hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ee4a5d0ab3d83e9785962d718d4dbd60517468b
| 36
|
py
|
Python
|
project/__init__.py
|
gunater/calculation-of-energy-levels
|
587aef1c138e77bde52bb9df20b74bd970ad2adf
|
[
"MIT"
] | null | null | null |
project/__init__.py
|
gunater/calculation-of-energy-levels
|
587aef1c138e77bde52bb9df20b74bd970ad2adf
|
[
"MIT"
] | null | null | null |
project/__init__.py
|
gunater/calculation-of-energy-levels
|
587aef1c138e77bde52bb9df20b74bd970ad2adf
|
[
"MIT"
] | null | null | null |
def __version__():
return '0.1'
| 12
| 18
| 0.611111
| 5
| 36
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.222222
| 36
| 2
| 19
| 18
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
0efcc78335f8edce1c003674830623823ed09499
| 501
|
py
|
Python
|
Print/printResiResnNameB4ResiX.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Print/printResiResnNameB4ResiX.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Print/printResiResnNameB4ResiX.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: Print resn, resi, atom name, and b-factor.
# Source: placeHolder
"""
cmd.do('Bfac_dict = { "Bfactors3" : [] };cmd.iterate("(${1:resi 133})","Bfactors3.append((resn,resi,name, b))", space=Bfac_dict); for i,j,k,l in Bfac_dict["Bfactors3"]: print("%s %s %s %.2f" % (i,j,k,l));')
"""
cmd.do('Bfac_dict = { "Bfactors3" : [] ;cmd.iterate("(resi 133)","Bfactors3.append((resn,resi,name, b))", space=Bfac_dict); for i,j,k,l in Bfac_dict["Bfactors3"]: print("%s %s %s %.2f" % (i,j,k,l));')
| 55.666667
| 206
| 0.610778
| 84
| 501
| 3.571429
| 0.345238
| 0.16
| 0.226667
| 0.053333
| 0.8
| 0.8
| 0.8
| 0.586667
| 0.586667
| 0.586667
| 0
| 0.033937
| 0.117764
| 501
| 8
| 207
| 62.625
| 0.644796
| 0.570858
| 0
| 0
| 0
| 1
| 0.92233
| 0.305825
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
1607c250189f354a39a4f3ea4437d491e337d430
| 964
|
py
|
Python
|
episode-3-comparison-subtraction/comparison-test.py
|
nickelcarbide/arbitrary-precision-arithmetic-demo
|
7f9576ff7401c0d638553e7e7412e275569d6fd9
|
[
"Unlicense"
] | null | null | null |
episode-3-comparison-subtraction/comparison-test.py
|
nickelcarbide/arbitrary-precision-arithmetic-demo
|
7f9576ff7401c0d638553e7e7412e275569d6fd9
|
[
"Unlicense"
] | null | null | null |
episode-3-comparison-subtraction/comparison-test.py
|
nickelcarbide/arbitrary-precision-arithmetic-demo
|
7f9576ff7401c0d638553e7e7412e275569d6fd9
|
[
"Unlicense"
] | null | null | null |
from biguint import BigUInt
x = BigUInt()
x.from_py_int(5_000_000_000_000)
print("x =", x)
print("x.digits =", x.digits)
y = BigUInt()
y.from_py_int(123_456_789)
print("y =", y)
print("y.digits =", y.digits)
x.compare(y)
print("comparison =", x.compare(y))
print()
x.from_py_int(555_555)
print("x =", x)
print("x.digits =", x.digits)
y.from_py_int(8_132_129_309_329_813_280_982)
print("y =", y)
print("y.digits =", y.digits)
x.compare(y)
print("comparison =", x.compare(y))
print()
x.from_py_int(2_000_000_000)
print("x =", x)
print("x.digits =", x.digits)
y.from_py_int(6_000_000_000)
print("y =", y)
print("y.digits =", y.digits)
x.compare(y)
print("comparison =", x.compare(y))
print()
x.from_py_int(9_999_999_999_999)
print("x =", x)
print("y.digits =", x.digits)
y.from_py_int(9_999_999_999_999)
print("y =", y)
print("y.digits =", y.digits)
x.compare(y)
print("comparison =", x.compare(y))
| 17.851852
| 45
| 0.637967
| 175
| 964
| 3.262857
| 0.165714
| 0.115587
| 0.126095
| 0.171629
| 0.791594
| 0.791594
| 0.791594
| 0.767075
| 0.767075
| 0.697023
| 0
| 0.118372
| 0.158714
| 964
| 53
| 46
| 18.188679
| 0.585697
| 0
| 0
| 0.684211
| 0
| 0
| 0.16685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026316
| 0
| 0.026316
| 0.605263
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
162874e033caf08ca8e56ec619c25e859083ec0a
| 34,024
|
py
|
Python
|
infoblox_netmri/api/broker/v3_8_0/if_setting_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 12
|
2016-02-19T12:37:54.000Z
|
2022-03-04T20:11:08.000Z
|
infoblox_netmri/api/broker/v3_8_0/if_setting_broker.py
|
azinfoblox/infoblox-netmri
|
02372c5231e2677ab6299cb659a73c9a41b4b0f4
|
[
"Apache-2.0"
] | 18
|
2015-11-12T18:37:00.000Z
|
2021-05-19T07:59:55.000Z
|
infoblox_netmri/api/broker/v3_8_0/if_setting_broker.py
|
azinfoblox/infoblox-netmri
|
02372c5231e2677ab6299cb659a73c9a41b4b0f4
|
[
"Apache-2.0"
] | 18
|
2016-01-07T12:04:34.000Z
|
2022-03-31T11:05:41.000Z
|
from ..broker import Broker
class IfSettingBroker(Broker):
controller = "if_settings"
def index(self, **kwargs):
"""Lists the available if settings. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for interface setting.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for interface setting.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfSetting. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_settings: An array of the IfSetting objects that match the specified input criteria.
:rtype if_settings: Array of IfSetting
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available if settings matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The index of interface.
:type ifIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The index of interface.
:type ifIndex: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfSetting. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against if settings, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: CollectionInd, DeviceID, InterfaceID, SPMExcludedInd, created_at, id, ifIndex, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_settings: An array of the IfSetting objects that match the specified input criteria.
:rtype if_settings: Array of IfSetting
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available if settings matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: CollectionInd, DeviceID, InterfaceID, SPMExcludedInd, created_at, id, ifIndex, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CollectionInd: The operator to apply to the field CollectionInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CollectionInd: A flag indicating if data collection enabled on this interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CollectionInd: If op_CollectionInd is specified, the field named in this input will be compared to the value in CollectionInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CollectionInd must be specified if op_CollectionInd is specified.
:type val_f_CollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CollectionInd: If op_CollectionInd is specified, this value will be compared to the value in CollectionInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CollectionInd must be specified if op_CollectionInd is specified.
:type val_c_CollectionInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SPMExcludedInd: The operator to apply to the field SPMExcludedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SPMExcludedInd: A flag indicating if interface excluded from Switch Port Management. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SPMExcludedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SPMExcludedInd: If op_SPMExcludedInd is specified, the field named in this input will be compared to the value in SPMExcludedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SPMExcludedInd must be specified if op_SPMExcludedInd is specified.
:type val_f_SPMExcludedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SPMExcludedInd: If op_SPMExcludedInd is specified, this value will be compared to the value in SPMExcludedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SPMExcludedInd must be specified if op_SPMExcludedInd is specified.
:type val_c_SPMExcludedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for interface setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The index of interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfSetting. Valid values are id, DeviceID, ifIndex, InterfaceID, CollectionInd, created_at, updated_at, SPMExcludedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_settings: An array of the IfSetting objects that match the specified input criteria.
:rtype if_settings: Array of IfSetting
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified if setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for interface setting.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_setting: The if setting identified by the specified id.
:rtype if_setting: IfSetting
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new if setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device.
:type DeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ifIndex: The index of interface.
:type ifIndex: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param CollectionInd: A flag indicating if data collection enabled on this interface.
:type CollectionInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SPMExcludedInd: A flag indicating if interface excluded from Switch Port Management.
:type SPMExcludedInd: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created if setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created if setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created if setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_setting: The newly created if setting.
:rtype if_setting: IfSetting
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing if setting.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for interface setting.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for device. If omitted, this field will not be updated.
:type DeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The index of interface. If omitted, this field will not be updated.
:type ifIndex: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param CollectionInd: A flag indicating if data collection enabled on this interface. If omitted, this field will be updated to the default value.
:type CollectionInd: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param SPMExcludedInd: A flag indicating if interface excluded from Switch Port Management. If omitted, this field will be updated to the default value.
:type SPMExcludedInd: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated if setting.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated if setting.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated if setting.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_setting: The updated if setting.
:rtype if_setting: IfSetting
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified if setting from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for interface setting.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
| 47.586014
| 499
| 0.587438
| 4,223
| 34,024
| 4.672508
| 0.064883
| 0.080073
| 0.052047
| 0.064616
| 0.921194
| 0.919015
| 0.901784
| 0.887949
| 0.873505
| 0.870059
| 0
| 0.004655
| 0.324389
| 34,024
| 714
| 500
| 47.652661
| 0.853743
| 0.79635
| 0
| 0
| 0
| 0
| 0.052183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.411765
| false
| 0
| 0.058824
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
162a08d6f43ad28ba907b0f023d761992eadd58d
| 115
|
py
|
Python
|
utils.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | 1
|
2022-01-16T16:37:53.000Z
|
2022-01-16T16:37:53.000Z
|
utils.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | null | null | null |
utils.py
|
AhmedAlaa2024/Heal-and-Cure
|
c5db8f0e87cfe2f7394bc7f84c9ef490ceedf7b4
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def get_time_now_as_text():
return datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
| 28.75
| 56
| 0.695652
| 20
| 115
| 3.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 115
| 4
| 56
| 28.75
| 0.745098
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
162e3097de09eb43e801b12bade676ee7e177327
| 16,200
|
py
|
Python
|
panoptic_benchmark/engine/combine_methods.py
|
RAPNet/RAP
|
83662d8d44190f2f32cb2a455881b74a76e782c0
|
[
"MIT"
] | 4
|
2020-03-22T12:29:45.000Z
|
2022-01-30T03:27:50.000Z
|
panoptic_benchmark/engine/combine_methods.py
|
RAPNet/RAP
|
83662d8d44190f2f32cb2a455881b74a76e782c0
|
[
"MIT"
] | null | null | null |
panoptic_benchmark/engine/combine_methods.py
|
RAPNet/RAP
|
83662d8d44190f2f32cb2a455881b74a76e782c0
|
[
"MIT"
] | null | null | null |
import os
import PIL.Image as Image
from pycocotools import mask as COCOmask
from ..utils.pan_utils import IdGenerator, id2rgb, save_json
import numpy as np
import cv2
import copy
import torch
def combine_to_panoptic_heuristic(cfg, img_info, inst_results, sem_results, id_generator, segmentations_folder):
overlap_thr = cfg.MODEL.SEMANTIC.OVERLAP_THR
confidence_thr = cfg.MODEL.SEMANTIC.CONFIDENCE_THR
stuff_area_limit = cfg.MODEL.SEMANTIC.STUFF_AREA_LIMIT
# remove instance with score less than confidence_thr
inst_results = [inst for inst in inst_results if not inst["score"] < confidence_thr]
# sort instance by score
inst_results = sorted(inst_results, key=lambda el: -el['score'])
# construct panoptic segmentation image
pan_segm_id = np.zeros((img_info['height'], img_info['width']), dtype=np.uint32)
used = None
annotation = {}
try:
annotation['image_id'] = int(img_info["id"])
except Exception:
annotation['image_id'] = img_info["id"]
annotation['file_name'] = img_info["file_name"].replace('.jpg', '.png')
segments_info = []
# --- combine instance one by one
for ann in inst_results:
area = COCOmask.area(ann['segmentation'])
if area == 0:
continue
if used is None:
intersect = 0
used = copy.deepcopy(ann['segmentation'])
else:
intersect = COCOmask.area(
COCOmask.merge([used, ann['segmentation']], intersect=True)
)
if intersect / area > overlap_thr:
continue
used = COCOmask.merge([used, ann['segmentation']], intersect=False)
mask = COCOmask.decode(ann['segmentation']) == 1
if intersect != 0:
mask = np.logical_and(pan_segm_id == 0, mask)
#print(ann["id"])
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask] = segment_id
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
# --- combine semantic area one by one
for ann in sem_results:
mask = COCOmask.decode(ann['segmentation']) == 1
mask_left = np.logical_and(pan_segm_id == 0, mask)
if mask_left.sum() < stuff_area_limit:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask_left] = segment_id
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
annotation['segments_info'] = segments_info
Image.fromarray(id2rgb(pan_segm_id)).save(os.path.join(segmentations_folder, annotation['file_name']))
return annotation
def combine_to_panoptic_RAP(cfg, img_info, inst_results, sem_results, id_generator, segmentations_folder):
overlap_thr = cfg.MODEL.SEMANTIC.OVERLAP_THR
confidence_thr = cfg.MODEL.SEMANTIC.CONFIDENCE_THR
stuff_area_limit = cfg.MODEL.SEMANTIC.STUFF_AREA_LIMIT
large_overlap_thr = cfg.MODEL.SEMANTIC.LARGE_OVERLAP_THR
# remove instance with score less than confidence_thr
inst_results = [inst for inst in inst_results if not inst["score"] < confidence_thr]
# sort instance by score
inst_results = sorted(inst_results, key=lambda el: -el['score'])
# construct panoptic segmentation image
pan_segm_id = np.zeros((img_info['height'], img_info['width']), dtype=np.uint32)
used_label = np.zeros((img_info['height'], img_info['width']), np.int32)
used_relation_vals = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
used_relation_vals[:,:] = float("Inf")
annotation = {}
try:
annotation['image_id'] = int(img_info["id"])
except Exception:
annotation['image_id'] = img_info["id"]
annotation['file_name'] = img_info["file_name"].replace('.jpg', '.png')
segments_info = []
# --- combine instance one by one
#inst_count = 0
for ann in inst_results:
area = COCOmask.area(ann['segmentation'])
mask = COCOmask.decode(ann['segmentation']) == 1
if area == 0:
continue
intersect_mask = np.logical_and(pan_segm_id > 0, mask)
same_label_intersect_mask = np.logical_and(intersect_mask, used_label==ann["label"])
intersect = np.sum(intersect_mask)
same_label_intersect = np.sum(same_label_intersect_mask)
if same_label_intersect / area > overlap_thr:
continue
is_valid = True
if intersect != 0:
intersect_ids = np.unique(pan_segm_id[intersect_mask])
for intersect_id in intersect_ids:
id_area = np.sum(pan_segm_id==intersect_id)
intersect_id_mask = np.logical_and((pan_segm_id == intersect_id), intersect_mask)
intersect_id_area= np.sum(intersect_id_mask)
if intersect_id_area/area >= large_overlap_thr and intersect_id_area/id_area >= large_overlap_thr:
is_valid = False
break
mask = np.logical_and(used_relation_vals > ann["relation_val"], mask)
if not is_valid:
continue
if np.sum(mask) / area <= overlap_thr:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask] = segment_id
used_label[mask] = ann["label"]
used_relation_vals[mask] = ann["relation_val"]
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
# check if instance still exist
refine_segments_info = []
for panoptic_ann in segments_info:
segment_id = panoptic_ann['id']
area = np.sum(pan_segm_id == segment_id)
if area > 0:
refine_segments_info.append(panoptic_ann)
segments_info = refine_segments_info
# --- combine semantic area one by one
for ann in sem_results:
mask = COCOmask.decode(ann['segmentation']) == 1
mask_left = np.logical_and(pan_segm_id == 0, mask)
if mask_left.sum() < stuff_area_limit:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask_left] = segment_id
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
annotation['segments_info'] = segments_info
Image.fromarray(id2rgb(pan_segm_id)).save(os.path.join(segmentations_folder, annotation['file_name']))
return annotation
def combine_to_panoptic_ocfusion(cfg, img_info, inst_results, sem_results, id_generator, segmentations_folder):
overlap_thr = cfg.MODEL.SEMANTIC.OVERLAP_THR
confidence_thr = cfg.MODEL.SEMANTIC.CONFIDENCE_THR
stuff_area_limit = cfg.MODEL.SEMANTIC.STUFF_AREA_LIMIT
# remove instance with score less than confidence_thr
inst_results = [inst for inst in inst_results if not inst["score"] < confidence_thr]
# sort instance by score
inst_results = sorted(inst_results, key=lambda el: -el['score'])
# construct panoptic segmentation image
pan_segm_id = np.zeros((img_info['height'], img_info['width']), dtype=np.uint32)
used = np.zeros((img_info['height'], img_info['width']), np.int32)
annotation = {}
try:
annotation['image_id'] = int(img_info["id"])
except Exception:
annotation['image_id'] = img_info["id"]
annotation['file_name'] = img_info["file_name"].replace('.jpg', '.png')
segments_info = []
# --- combine instance one by one
for ann in inst_results:
area = COCOmask.area(ann['segmentation'])
mask = COCOmask.decode(ann['segmentation']) == 1
inst_id = ann["id"]
occlusion_val = ann["occlusion_val"]
if area == 0:
continue
intersect_mask = np.logical_and((used > 0), mask)
intersect = np.sum(intersect_mask)
if intersect != 0:
mask = np.logical_and(used==0, mask)
intersect_ids = np.unique(used[intersect_mask])
for intersect_id in intersect_ids:
id_area = np.sum(used==intersect_id)
intersect_id_mask = np.logical_and((used == intersect_id), intersect_mask)
intersect_id_area= np.sum(intersect_id_mask)
if intersect_id_area/area >= 0.2 or intersect_id_area/id_area >= 0.2:
val = occlusion_val[intersect_id-1]
if val:
mask[intersect_id_mask] = True
if np.sum(mask)/area <= overlap_thr:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask] = segment_id
used[mask] = inst_id
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
# check if instance still exist
refine_segments_info = []
for panoptic_ann in segments_info:
segment_id = panoptic_ann['id']
area = np.sum(pan_segm_id == segment_id)
if area > 0:
refine_segments_info.append(panoptic_ann)
segments_info = refine_segments_info
# --- combine semantic area one by one
for ann in sem_results:
mask = COCOmask.decode(ann['segmentation']) == 1
mask_left = np.logical_and(pan_segm_id == 0, mask)
if mask_left.sum() < stuff_area_limit:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask_left] = segment_id
panoptic_ann = {}
panoptic_ann['id'] = segment_id
panoptic_ann['category_id'] = ann['label']
segments_info.append(panoptic_ann)
annotation['segments_info'] = segments_info
Image.fromarray(id2rgb(pan_segm_id)).save(os.path.join(segmentations_folder, annotation['file_name']))
return annotation
def combine_to_panoptic_for_acc(cfg, img_info, inst_results, id_generator, target_relations):
overlap_thr = cfg.MODEL.SEMANTIC.OVERLAP_THR
confidence_thr = cfg.MODEL.SEMANTIC.CONFIDENCE_THR
stuff_area_limit = cfg.MODEL.SEMANTIC.STUFF_AREA_LIMIT
large_overlap_thr = cfg.MODEL.SEMANTIC.LARGE_OVERLAP_THR
# remove instance with score less than confidence_thr
inst_results = [inst for inst in inst_results if not inst["score"] < confidence_thr]
# sort instance by score
inst_results = sorted(inst_results, key=lambda el: -el['score'])
# construct panoptic segmentation image
pan_segm_id = np.zeros((img_info['height'], img_info['width']), dtype=np.uint32)
used_label = np.zeros((img_info['height'], img_info['width']), np.int32)
used_relation_vals = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
used_relation_vals[:,:] = float("Inf")
segment_id_to_instance_id = {}
segment_id_to_pred_val = {}
count = 0
infer_count = 0
acc_num = 0
score_acc_num = 0
# --- combine instance one by one
for ann in inst_results:
area = COCOmask.area(ann['segmentation'])
mask = COCOmask.decode(ann['segmentation']) == 1
if area == 0:
continue
intersect_mask = np.logical_and(pan_segm_id > 0, mask)
same_label_intersect_mask = np.logical_and(intersect_mask, used_label==ann["label"])
intersect = np.sum(intersect_mask)
same_label_intersect = np.sum(same_label_intersect_mask)
if same_label_intersect / area > overlap_thr:
continue
is_valid = True
if intersect != 0:
intersect_ids = np.unique(pan_segm_id[intersect_mask])
for intersect_id in intersect_ids:
id_area = np.sum(pan_segm_id==intersect_id)
intersect_id_mask = np.logical_and((pan_segm_id == intersect_id), intersect_mask)
intersect_id_area= np.sum(intersect_id_mask)
if intersect_id_area/area >= large_overlap_thr and intersect_id_area/id_area >= large_overlap_thr:
is_valid = False
break
# calculate acc
instance_id1 = segment_id_to_instance_id[intersect_id]
instance_id2 = ann["instance_id"]
pred_val1 = segment_id_to_pred_val[intersect_id]
pred_val2 = ann["relation_val"]
if (instance_id1, instance_id2) in target_relations:
if pred_val1 < pred_val2:
acc_num += 1
score_acc_num += 1
count += 1
elif (instance_id2, instance_id1) in target_relations:
if pred_val2 < pred_val1:
acc_num += 1
count += 1
mask = np.logical_and(used_relation_vals > ann["relation_val"], mask)
if is_valid:
infer_count += 1
if np.sum(mask) / area <= overlap_thr:
continue
if not is_valid:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask] = segment_id
used_label[mask] = ann["label"]
used_relation_vals[mask] = ann["relation_val"]
segment_id_to_instance_id[segment_id] = ann["instance_id"]
segment_id_to_pred_val[segment_id] = ann["relation_val"]
return acc_num, score_acc_num, infer_count, count
def combine_to_panoptic_for_ocfusion_acc(cfg, img_info, inst_results, id_generator, target_relations):
overlap_thr = cfg.MODEL.SEMANTIC.OVERLAP_THR
confidence_thr = cfg.MODEL.SEMANTIC.CONFIDENCE_THR
stuff_area_limit = cfg.MODEL.SEMANTIC.STUFF_AREA_LIMIT
# remove instance with score less than confidence_thr
inst_results = [inst for inst in inst_results if not inst["score"] < confidence_thr]
# sort instance by score
inst_results = sorted(inst_results, key=lambda el: -el['score'])
# construct panoptic segmentation image
pan_segm_id = np.zeros((img_info['height'], img_info['width']), dtype=np.uint32)
used = np.zeros((img_info['height'], img_info['width']), np.int32)
id_to_instance_id = {}
count = 0
infer_count = 0
acc_num = 0
score_acc_num = 0
# --- combine instance one by one
for ann in inst_results:
area = COCOmask.area(ann['segmentation'])
mask = COCOmask.decode(ann['segmentation']) == 1
inst_id = ann["id"]
occlusion_val = ann["occlusion_val"]
if area == 0:
continue
intersect_mask = np.logical_and((used > 0), mask)
intersect = np.sum(intersect_mask)
if intersect != 0:
mask = np.logical_and(used==0, mask)
intersect_ids = np.unique(used[intersect_mask])
for intersect_id in intersect_ids:
id_area = np.sum(used==intersect_id)
intersect_id_mask = np.logical_and((used == intersect_id), intersect_mask)
intersect_id_area= np.sum(intersect_id_mask)
val = occlusion_val[intersect_id-1]
# calculate acc
instance_id1 = id_to_instance_id[intersect_id]
instance_id2 = ann["instance_id"]
if (instance_id1, instance_id2) in target_relations:
if not val:
acc_num += 1
score_acc_num += 1
count += 1
elif (instance_id2, instance_id1) in target_relations:
if val:
acc_num += 1
count += 1
if intersect_id_area/area >= 0.2 or intersect_id_area/id_area >= 0.2:
infer_count += 1
if val:
mask[intersect_id_mask] = True
if np.sum(mask)/area <= overlap_thr:
continue
segment_id = id_generator.get_id(ann['label'])
pan_segm_id[mask] = segment_id
used[mask] = inst_id
id_to_instance_id[inst_id] = ann["instance_id"]
return acc_num, score_acc_num, infer_count, count
| 38.297872
| 114
| 0.63284
| 2,084
| 16,200
| 4.610365
| 0.068138
| 0.04465
| 0.028102
| 0.024979
| 0.941923
| 0.926728
| 0.912677
| 0.897794
| 0.894359
| 0.874376
| 0
| 0.008989
| 0.265185
| 16,200
| 422
| 115
| 38.388626
| 0.798135
| 0.058889
| 0
| 0.879365
| 0
| 0
| 0.058561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.025397
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
163bab7532266529f088a03b992dd01e00d03075
| 19,223
|
py
|
Python
|
tests/molecular/molecules/molecule/fixtures/cof/hexagonal.py
|
andrewtarzia/stk
|
1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a
|
[
"MIT"
] | 21
|
2018-04-12T16:25:24.000Z
|
2022-02-14T23:05:43.000Z
|
tests/molecular/molecules/molecule/fixtures/cof/hexagonal.py
|
JelfsMaterialsGroup/stk
|
0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2
|
[
"MIT"
] | 8
|
2019-03-19T12:36:36.000Z
|
2020-11-11T12:46:00.000Z
|
tests/molecular/molecules/molecule/fixtures/cof/hexagonal.py
|
supramolecular-toolkit/stk
|
0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2
|
[
"MIT"
] | 5
|
2018-08-07T13:00:16.000Z
|
2021-11-01T00:55:10.000Z
|
import pytest
import stk
from ...case_data import CaseData
@pytest.fixture(
scope='session',
params=(
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.Hexagonal(
building_blocks={
stk.BuildingBlock(
smiles='BrC1=C(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
4, 5, 6, 7, 8, 9, 20, 21, 23, 24, 30, 36,
38, 40, 41, 42, 43, 46, 47, 52, 53, 60, 61,
),
stk.BuildingBlock(
smiles='BrN1N(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
10, 11, 12, 13, 14, 15, 22, 25, 26, 27, 28,
29, 37, 39, 44, 45, 54, 55, 56, 57, 58, 59,
31, 62, 63,
),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[N+][C+2]C2(Br)[C+](I)[C+'
'](I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (0, 1, 18, 50, 51),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][C+2]C2(Br)[C+](I)[C+]'
'(I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (2, 16, 34, 49),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][O]C2(Br)[C+](I)[C+](I'
')[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (3, 17, 19, 32, 33, 35, 48),
},
lattice_size=(2, 2, 1),
vertex_alignments={0: 5},
),
),
smiles=(
'BrC1=C([C+]2[C+](Br)[C+](I)[C+]3C4=C([C+]=N4)[C+]4[C+'
'](Br)[C+](Br)[C+]5[C+]6N7N=[C+]N7C78OS[C+]7[C+]7C9=C('
'[C+]=N9)[C+]9[C+]([C+](Br)[C+](I)[C+]%10N%11N=[C+]N%1'
'1[C+]%11[C+](Br)[C+](Br)[C+](I)[C+](N%12N=[C+]N%12Br)'
'C%12([C+2][NH2+][C+]%11%12)N%11[C+]=NN%11[C+]%11[C+]('
'I)[C+](C%12=C(Br)[C+]=N%12)C%12%13[C+2][NH2+][C+]%12['
'C+](C%12=C(N=[C+]%12)[C+]7[C+]7[C+]%12[C+]8N8N=[C+]N8'
'[C+]8[C+](C%14=C([C+]=N%14)C6%14OS[C+]4%14)[C+]4[C+]6'
'C%14=C(N=[C+]%14)[C+]%14[C+](C%15=C([C+]=N%15)[C+]2[C'
'+]2SOC23N2N=[C+]N42)[C+]2S[C+2]C2(Br)[C+](N2N=[C+]N2B'
'r)[C+]2[C+]%14N3N=[C+]N3[C+]3[C+]4C%14=C(N=[C+]%14)[C'
'+]%14[C+]([C+](Br)[C+](N%15N=[C+]N%15Br)[C+]%15C%16=C'
'(N=[C+]%16)[C+]%16[C+](Br)[C+]%17[NH2+][C+2]C%17(C%17'
'=C(Br)[C+]=N%17)[C+](N%17N=[C+]N%17Br)[C+](N%17N=[C+]'
'N%17Br)[C+]%16C%16=C([C+]=N%16)[C+]%16[C+](C%17=C(N=['
'C+]%17)C%14%17[C+2][NH2+][C+]%15%17)[C+]%14C%15=C([C+'
']=N%15)C%15(OS[C+]4%15)[C+]4[C+]%15C%17=C(N=[C+]%17)['
'C+]%17[C+]%18C%19=C(N=[C+]%19)[C+]%19[C+]([C+](C%20=C'
'(Br)[C+]=N%20)[C+](C%20=C(Br)[C+]=N%20)[C+](C%20=C([C'
'+]=N%20)[C+]%14[C+](N%14N=[C+]N%14Br)C%14(N%20N=[C+]N'
'%20Br)[C+2]S[C+]%16%14)C%14(OS[C+]%19%14)N%14[C+]=NN4'
'%14)N4N=[C+]N4[C+]4[C+]%14C%16=C(N=[C+]%16)C%16(OS[C+'
']%18%16)[C+]([C+]([C+]%17N%16[C+]=NN%16[C+]8C8([C+2]['
'NH2+][C+]68)N6N=[C+]N6[C+]%153)N3[C+]=NN%123)N3N=[C+]'
'N3[C+]3[C+]([C+](C6=C%13N=[C+]6)[C+](I)[C+](C6=C(Br)['
'C+]=N6)C6(OS[C+]36)C3=C(N=[C+]3)[C+]%14[C+](I)[C+](N3'
'N=[C+]N3Br)C3(N6N=[C+]N6Br)[C+2]S[C+]43)N3[C+]=NN73)N'
'3[C+]=NN23)[C+]%11N2[C+]=NN2C%102[C+2]S[C+]92)N2N=[C+'
']N52)N=[C+]1'
),
name=name,
),
# Non-planar linear BB.
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.Hexagonal(
building_blocks=(
stk.BuildingBlock(
smiles='C1(C(C(C(C(C1Br)Br)Br)Br)Br)Br',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='C1=C(Br)C=C2C=CC(Br)=CC2=C1CCCC',
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(2, 2, 1),
),
),
smiles=(
'[H]C1=C([H])C2=C([H])C(C3([H])C([H])(Br)C([H])(Br)C(['
'H])(Br)C4([H])C5=C([H])C6=C(C([H])([H])C([H])([H])C(['
'H])([H])C([H])([H])[H])C([H])=C(C([H])=C6C([H])=C5[H]'
')C5([H])C([H])(Br)C([H])(Br)C6([H])C7=C([H])C(C([H])('
'[H])C([H])([H])C([H])([H])C([H])([H])[H])=C8C([H])=C('
'C([H])=C([H])C8=C7[H])C7([H])C([H])(Br)C([H])(Br)C8(['
'H])C9=C([H])C%10=C(C([H])([H])C([H])([H])C([H])([H])C'
'([H])([H])[H])C([H])=C(C([H])=C%10C([H])=C9[H])C9([H]'
')C([H])(Br)C([H])(Br)C([H])(C%10=C([H])C(C([H])([H])C'
'([H])([H])C([H])([H])C([H])([H])[H])=C%11C([H])=C(Br)'
'C([H])=C([H])C%11=C%10[H])C%10([H])C%11=C([H])C%12=C('
'C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C([H]'
')=C(C([H])=C%12C([H])=C%11[H])C%11([H])C([H])(Br)C([H'
'])(C%12=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H'
'])([H])[H])=C%13C([H])=C(Br)C([H])=C([H])C%13=C%12[H]'
')C%12([H])C%13=C([H])C(C([H])([H])C([H])([H])C([H])(['
'H])C([H])([H])[H])=C%14C([H])=C(C([H])=C([H])C%14=C%1'
'3[H])C%13([H])C([H])(Br)C([H])(C%14=C([H])C(C([H])([H'
'])C([H])([H])C([H])([H])C([H])([H])[H])=C%15C([H])=C('
'Br)C([H])=C([H])C%15=C%14[H])C%14([H])C%15=C([H])C%16'
'=C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C('
'[H])=C(C([H])=C%16C([H])=C%15[H])C%15([H])C([H])(Br)C'
'([H])(C%16=C([H])C(C([H])([H])C([H])([H])C([H])([H])C'
'([H])([H])[H])=C%17C([H])=C(Br)C([H])=C([H])C%17=C%16'
'[H])C([H])(C%16=C([H])C(C([H])([H])C([H])([H])C([H])('
'[H])C([H])([H])[H])=C%17C([H])=C(Br)C([H])=C([H])C%17'
'=C%16[H])C([H])(C%16=C([H])C(C([H])([H])C([H])([H])C('
'[H])([H])C([H])([H])[H])=C%17C([H])=C(Br)C([H])=C([H]'
')C%17=C%16[H])C%15([H])C%15=C([H])C(C([H])([H])C([H])'
'([H])C([H])([H])C([H])([H])[H])=C%16C([H])=C(C([H])=C'
'([H])C%16=C%15[H])C%15([H])C%16([H])C%17=C([H])C%18=C'
'(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C([H'
'])=C(C([H])=C%18C([H])=C%17[H])C%14([H])C%13([H])C%13'
'=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])'
'[H])=C%14C([H])=C(C([H])=C([H])C%14=C%13[H])C%13([H])'
'C%14([H])C%17=C([H])C%18=C(C([H])([H])C([H])([H])C([H'
'])([H])C([H])([H])[H])C([H])=C(C([H])=C%18C([H])=C%17'
'[H])C%12([H])C%11([H])C%11=C([H])C(C([H])([H])C([H])('
'[H])C([H])([H])C([H])([H])[H])=C%12C([H])=C(C([H])=C('
'[H])C%12=C%11[H])C%11([H])C([H])(C%12=C([H])C%17=C(C('
'[H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C([H])='
'C(C([H])=C%17C([H])=C%12[H])C9%10[H])C9([H])C%10=C([H'
'])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])='
'C%12C([H])=C(C([H])=C([H])C%12=C%10[H])C8([H])C7([H])'
'C7=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H'
'])[H])=C8C([H])=C(C([H])=C([H])C8=C7[H])C7([H])C8([H]'
')C%10=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])'
'([H])[H])=C%12C([H])=C(C([H])=C([H])C%12=C%10[H])C6(['
'H])C5([H])C5=C([H])C(C([H])([H])C([H])([H])C([H])([H]'
')C([H])([H])[H])=C6C([H])=C(C([H])=C([H])C6=C5[H])C5('
'[H])C([H])(C6=C([H])C(C([H])([H])C([H])([H])C([H])([H'
'])C([H])([H])[H])=C%10C([H])=C(C([H])=C([H])C%10=C6[H'
'])C43[H])C([H])(Br)C([H])(C3=C([H])C(C([H])([H])C([H]'
')([H])C([H])([H])C([H])([H])[H])=C4C([H])=C(Br)C([H])'
'=C([H])C4=C3[H])C3([H])C4=C([H])C(C([H])([H])C([H])(['
'H])C([H])([H])C([H])([H])[H])=C6C([H])=C(C([H])=C([H]'
')C6=C4[H])C4([H])C([H])(Br)C([H])(C6=C([H])C(C([H])(['
'H])C([H])([H])C([H])([H])C([H])([H])[H])=C%10C([H])=C'
'(Br)C([H])=C([H])C%10=C6[H])C6([H])C%10=C([H])C%12=C('
'C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C([H]'
')=C(C([H])=C%12C([H])=C%10[H])C%10([H])C([H])(Br)C([H'
'])(C%12=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H'
'])([H])[H])=C%17C([H])=C(Br)C([H])=C([H])C%17=C%12[H]'
')C([H])(C%12=C([H])C(C([H])([H])C([H])([H])C([H])([H]'
')C([H])([H])[H])=C%17C([H])=C(Br)C([H])=C([H])C%17=C%'
'12[H])C%12([H])C%17=C([H])C%18=C(C([H])([H])C([H])([H'
'])C([H])([H])C([H])([H])[H])C([H])=C(C([H])=C%18C([H]'
')=C%17[H])C%17([H])C%18([H])C%19=C([H])C(C([H])([H])C'
'([H])([H])C([H])([H])C([H])([H])[H])=C%20C([H])=C(C(['
'H])=C([H])C%20=C%19[H])C%19([H])C%20([H])C%21=C([H])C'
'(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])=C%2'
'2C([H])=C(C([H])=C([H])C%22=C%21[H])C([H])(C%14([H])C'
'%14=C([H])C%21=C(C([H])([H])C([H])([H])C([H])([H])C(['
'H])([H])[H])C([H])=C(C([H])=C%21C([H])=C%14[H])C%11(['
'H])C%11([H])C%14=C([H])C(C([H])([H])C([H])([H])C([H])'
'([H])C([H])([H])[H])=C%21C([H])=C(C([H])=C([H])C%21=C'
'%14[H])C%20([H])C%14([H])C%20=C([H])C%21=C(C([H])([H]'
')C([H])([H])C([H])([H])C([H])([H])[H])C([H])=C(C([H])'
'=C%21C([H])=C%20[H])C([H])(C7([H])C7=C([H])C(C([H])(['
'H])C([H])([H])C([H])([H])C([H])([H])[H])=C%20C([H])=C'
'(C([H])=C([H])C%20=C7[H])C9%11[H])C([H])(C7=C([H])C(C'
'([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])=C9C(['
'H])=C(C([H])=C([H])C9=C7[H])C4([H])C6([H])C4=C([H])C6'
'=C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])C('
'[H])=C(C([H])=C6C([H])=C4[H])C%14([H])C%19([H])C4=C(['
'H])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])'
'=C6C([H])=C(C([H])=C([H])C6=C4[H])C%10%12[H])C8([H])C'
'4=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H]'
')[H])=C6C([H])=C(C([H])=C([H])C6=C4[H])C53[H])C3([H])'
'C4=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])([H'
'])[H])=C5C([H])=C(C([H])=C([H])C5=C4[H])C%18([H])C([H'
'])(C4=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H])'
'([H])[H])=C5C([H])=C(C([H])=C([H])C5=C4[H])C([H])(C%1'
'6([H])C4=C([H])C(C([H])([H])C([H])([H])C([H])([H])C(['
'H])([H])[H])=C5C([H])=C(C([H])=C([H])C5=C4[H])C%133[H'
'])C([H])(C3=C([H])C(C([H])([H])C([H])([H])C([H])([H])'
'C([H])([H])[H])=C4C([H])=C(Br)C([H])=C([H])C4=C3[H])C'
'%15([H])C3=C([H])C(C([H])([H])C([H])([H])C([H])([H])C'
'([H])([H])[H])=C4C([H])=C(Br)C([H])=C([H])C4=C3[H])C('
'[H])(C3=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H'
'])([H])[H])=C4C([H])=C(Br)C([H])=C([H])C4=C3[H])C%17('
'[H])C3=C([H])C(C([H])([H])C([H])([H])C([H])([H])C([H]'
')([H])[H])=C4C([H])=C(Br)C([H])=C([H])C4=C3[H])=C([H]'
')C(C([H])([H])C([H])([H])C([H])([H])C([H])([H])[H])=C'
'2C([H])=C1Br'
),
name=name,
),
# One placer atom linear BB.
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.Hexagonal(
building_blocks=(
stk.BuildingBlock(
smiles='C1(C(C(C(C(C1Br)Br)Br)Br)Br)Br',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='C(Br)Br',
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(2, 2, 1),
),
),
smiles=(
'[H]C([H])(Br)C1([H])C([H])(Br)C2([H])C([H])([H])C3([H'
'])C([H])(C([H])([H])Br)C([H])(Br)C4([H])C([H])([H])C5'
'([H])C([H])(C([H])([H])Br)C([H])(Br)C6([H])C([H])([H]'
')C7([H])C([H])(C([H])([H])Br)C([H])(Br)C([H])(Br)C8(['
'H])C([H])([H])C9([H])C([H])(Br)C([H])(Br)C%10([H])C(['
'H])([H])C%11([H])C([H])(Br)C([H])(Br)C%12([H])C([H])('
'[H])C%13([H])C([H])(Br)C([H])(Br)C([H])(Br)C([H])(C(['
'H])([H])Br)C%13([H])C([H])([H])C%13([H])C([H])(Br)C(['
'H])(C([H])([H])Br)C%14([H])C([H])([H])C%15([H])C([H])'
'(Br)C([H])(C([H])([H])Br)C%16([H])C([H])([H])C%17([H]'
')C([H])(Br)C([H])(C([H])([H])Br)C([H])(C([H])([H])Br)'
'C%18([H])C([H])([H])C%19([H])C([H])(C([H])([H])Br)C(['
'H])(C([H])([H])Br)C%20([H])C([H])([H])C%21([H])C([H])'
'(C([H])([H])Br)C([H])(C([H])([H])Br)C([H])(C([H])([H]'
')C2([H])C([H])(C([H])([H])Br)C1([H])C([H])([H])Br)C1('
'[H])C([H])([H])C3([H])C4([H])C([H])([H])C2([H])C3([H]'
')C([H])([H])C5([H])C6([H])C([H])([H])C4([H])C([H])(C('
'[H])([H])C87[H])C5([H])C([H])([H])C9([H])C%10([H])C(['
'H])([H])C6([H])C7([H])C([H])([H])C%11([H])C%12([H])C('
'[H])([H])C%13([H])C%14([H])C([H])([H])C7([H])C7([H])C'
'([H])([H])C%15([H])C%16([H])C([H])([H])C8([H])C([H])('
'C([H])([H])C%17%18[H])C9([H])C([H])([H])C%19([H])C%20'
'([H])C([H])([H])C([H])(C2([H])C([H])([H])C%211[H])C1('
'[H])C([H])([H])C9([H])C2([H])C([H])([H])C([H])(C5([H]'
')C([H])([H])C6([H])C7([H])C([H])([H])C82[H])C4([H])C('
'[H])([H])C31[H]'
),
name=name,
),
# Two placer atom linear BB.
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.Hexagonal(
building_blocks=(
stk.BuildingBlock(
smiles='C1(C(C(C(C(C1Br)Br)Br)Br)Br)Br',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(2, 2, 1),
),
),
smiles=(
'[H]C([H])(Br)C([H])([H])C1([H])C([H])(Br)C([H])(Br)C('
'[H])(Br)C2([H])C([H])([H])C([H])([H])C3([H])C([H])(Br'
')C([H])(Br)C4([H])C([H])([H])C([H])([H])C5([H])C([H])'
'(Br)C([H])(Br)C6([H])C([H])([H])C([H])([H])C7([H])C(['
'H])(Br)C([H])(Br)C([H])(C([H])([H])C([H])([H])Br)C8(['
'H])C([H])([H])C([H])([H])C9([H])C([H])(Br)C([H])(C([H'
'])([H])C([H])([H])Br)C%10([H])C([H])([H])C([H])([H])C'
'%11([H])C([H])(Br)C([H])(C([H])([H])C([H])([H])Br)C%1'
'2([H])C([H])([H])C([H])([H])C%13([H])C([H])(Br)C([H])'
'(C([H])([H])C([H])([H])Br)C([H])(C([H])([H])C([H])([H'
'])Br)C([H])(C([H])([H])C([H])([H])Br)C%13([H])C([H])('
'[H])C([H])([H])C%13([H])C([H])(C([H])([H])C([H])([H])'
'Br)C([H])(C([H])([H])C([H])([H])Br)C%14([H])C([H])([H'
'])C([H])([H])C%15([H])C([H])(C([H])([H])C([H])([H])Br'
')C([H])(C([H])([H])C([H])([H])Br)C%16([H])C([H])([H])'
'C([H])([H])C%17([H])C([H])(C([H])([H])C([H])([H])Br)C'
'([H])(C([H])([H])C([H])([H])Br)C([H])(Br)C%18([H])C(['
'H])([H])C([H])([H])C%19([H])C([H])(C([H])([H])C([H])('
'[H])Br)C([H])(Br)C%20([H])C([H])([H])C([H])([H])C%21('
'[H])C([H])(C([H])([H])C([H])([H])Br)C([H])(Br)C([H])('
'C([H])([H])C([H])([H])C12[H])C1([H])C([H])([H])C([H])'
'([H])C3([H])C4([H])C([H])([H])C([H])([H])C2([H])C3([H'
'])C([H])([H])C([H])([H])C5([H])C6([H])C([H])([H])C([H'
'])([H])C4([H])C([H])(C([H])([H])C([H])([H])C78[H])C5('
'[H])C([H])([H])C([H])([H])C9([H])C%10([H])C([H])([H])'
'C([H])([H])C6([H])C7([H])C([H])([H])C([H])([H])C%11(['
'H])C%12([H])C([H])([H])C([H])([H])C%13([H])C%14([H])C'
'([H])([H])C([H])([H])C7([H])C7([H])C([H])([H])C([H])('
'[H])C%15([H])C%16([H])C([H])([H])C([H])([H])C8([H])C('
'[H])(C([H])([H])C([H])([H])C%18%17[H])C9([H])C([H])(['
'H])C([H])([H])C%19([H])C%20([H])C([H])([H])C([H])([H]'
')C([H])(C2([H])C([H])([H])C([H])([H])C%211[H])C1([H])'
'C([H])([H])C([H])([H])C9([H])C2([H])C([H])([H])C([H])'
'([H])C([H])(C4([H])C([H])([H])C([H])([H])C31[H])C5([H'
'])C([H])([H])C([H])([H])C6([H])C7([H])C([H])([H])C([H'
'])([H])C82[H]'
),
name=name,
),
),
)
def cof_hexagonal(request) -> CaseData:
return request.param(
f'{request.fixturename}{request.param_index}',
)
| 57.21131
| 71
| 0.288821
| 3,441
| 19,223
| 1.605638
| 0.062482
| 0.232398
| 0.211765
| 0.206335
| 0.805611
| 0.774118
| 0.748778
| 0.716923
| 0.687602
| 0.660271
| 0
| 0.082921
| 0.327472
| 19,223
| 335
| 72
| 57.38209
| 0.344446
| 0.003902
| 0
| 0.322188
| 0
| 0.601824
| 0.554116
| 0.545863
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00304
| false
| 0
| 0.009119
| 0.00304
| 0.015198
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
165ec793228e3706f5d2ae6985104660e7099965
| 49
|
py
|
Python
|
TrainingRestnet18withTinyImagenetDataset/poc/__init__.py
|
csharpshooter/DeepLearning
|
c1d20660c32076468970f7376931e1fcd0d2644e
|
[
"MIT"
] | null | null | null |
TrainingRestnet18withTinyImagenetDataset/poc/__init__.py
|
csharpshooter/DeepLearning
|
c1d20660c32076468970f7376931e1fcd0d2644e
|
[
"MIT"
] | null | null | null |
TrainingRestnet18withTinyImagenetDataset/poc/__init__.py
|
csharpshooter/DeepLearning
|
c1d20660c32076468970f7376931e1fcd0d2644e
|
[
"MIT"
] | null | null | null |
from .gradcamexperiment import GradcamExperiment
| 24.5
| 48
| 0.897959
| 4
| 49
| 11
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.977778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1697a017dd9e7919e85fc241ac948825642e4755
| 9,471
|
py
|
Python
|
pepper_tools/laser/getlaser.py
|
pet1330/spqrel_tools
|
e8e3b771680acf1f60260d1c4ced757a78d648b5
|
[
"MIT"
] | null | null | null |
pepper_tools/laser/getlaser.py
|
pet1330/spqrel_tools
|
e8e3b771680acf1f60260d1c4ced757a78d648b5
|
[
"MIT"
] | null | null | null |
pepper_tools/laser/getlaser.py
|
pet1330/spqrel_tools
|
e8e3b771680acf1f60260d1c4ced757a78d648b5
|
[
"MIT"
] | null | null | null |
#http://doc.aldebaran.com/2-5/naoqi/core/almemory-api.html
#http://doc.aldebaran.com/2-5/family/pepper_technical/pepper_dcm/actuator_sensor_names.html#ju-lasers
import qi
import argparse
import sys
import time
import threading
import os
laserValueList = [
# RIGHT LASER
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Right/Horizontal/Seg15/Y/Sensor/Value",
# FRONT LASER
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Front/Horizontal/Seg15/Y/Sensor/Value",
# LEFT LASER
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg01/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg01/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg02/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg02/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg03/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg03/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg04/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg04/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg05/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg05/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg06/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg06/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg07/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg07/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg08/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg08/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg09/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg09/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg10/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg10/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg11/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg11/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg12/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg12/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg13/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg13/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg14/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg14/Y/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg15/X/Sensor/Value",
"Device/SubDeviceList/Platform/LaserSensor/Left/Horizontal/Seg15/Y/Sensor/Value"
]
import threading
def rhMonitorThread (memory_service):
t = threading.currentThread()
while getattr(t, "do_run", True):
laserValues = memory_service.getListData(laserValueList)
print laserValues[44],laserValues[45]
time.sleep(0.1)
print "Exiting Thread"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["laserReader", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
#Starting services
memory_service = session.service("ALMemory")
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
#Program stays at this point until we stop it
app.run()
monitorThread.do_run = False
print "Finished"
if __name__ == "__main__":
main()
| 60.324841
| 101
| 0.788829
| 1,129
| 9,471
| 6.596103
| 0.134632
| 0.229623
| 0.326306
| 0.459245
| 0.849604
| 0.842755
| 0.837116
| 0.819927
| 0.819927
| 0
| 0
| 0.022909
| 0.073593
| 9,471
| 156
| 102
| 60.711538
| 0.825849
| 0.033787
| 0
| 0.015152
| 0
| 0
| 0.805601
| 0.774532
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.05303
| null | null | 0.030303
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
16a3376e9b268d8a3291ecc0ea4e10ac21b476e6
| 156,887
|
py
|
Python
|
src/plugins/nat/test/test_nat44_ei.py
|
aliireza/vpp
|
bf93c6e9bf340e323157ca2b4ffa8e83a36e5108
|
[
"Apache-2.0"
] | 1
|
2021-08-11T07:23:32.000Z
|
2021-08-11T07:23:32.000Z
|
src/plugins/nat/test/test_nat44_ei.py
|
aliireza/vpp
|
bf93c6e9bf340e323157ca2b4ffa8e83a36e5108
|
[
"Apache-2.0"
] | null | null | null |
src/plugins/nat/test/test_nat44_ei.py
|
aliireza/vpp
|
bf93c6e9bf340e323157ca2b4ffa8e83a36e5108
|
[
"Apache-2.0"
] | 1
|
2020-03-19T05:02:29.000Z
|
2020-03-19T05:02:29.000Z
|
#!/usr/bin/env python3
import ipaddress
import random
import socket
import struct
import unittest
from io import BytesIO
from time import sleep
import scapy.compat
from framework import VppTestCase, VppTestRunner
from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
PacketListField
from scapy.data import IP_PROTOS
from scapy.layers.inet import IP, TCP, UDP, ICMP
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply
from scapy.layers.l2 import Ether, ARP, GRE
from scapy.packet import Raw
from syslog_rfc5424_parser import SyslogMessage, ParseError
from syslog_rfc5424_parser.constants import SyslogSeverity
from util import ppp
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_neighbor import VppNeighbor
from vpp_papi import VppEnum
# NAT HA protocol event data
class Event(Packet):
name = "Event"
fields_desc = [ByteEnumField("event_type", None,
{1: "add", 2: "del", 3: "refresh"}),
ByteEnumField("protocol", None,
{0: "other", 1: "udp", 2: "tcp", 3: "icmp"}),
ShortField("flags", 0),
IPField("in_addr", None),
IPField("out_addr", None),
ShortField("in_port", None),
ShortField("out_port", None),
IPField("eh_addr", None),
IPField("ehn_addr", None),
ShortField("eh_port", None),
ShortField("ehn_port", None),
IntField("fib_index", None),
IntField("total_pkts", 0),
LongField("total_bytes", 0)]
def extract_padding(self, s):
return "", s
# NAT HA protocol header
class HANATStateSync(Packet):
name = "HA NAT state sync"
fields_desc = [XByteField("version", 1),
FlagsField("flags", 0, 8, ['ACK']),
FieldLenField("count", None, count_of="events"),
IntField("sequence_number", 1),
IntField("thread_index", 0),
PacketListField("events", [], Event,
count_from=lambda pkt: pkt.count)]
class MethodHolder(VppTestCase):
""" NAT create capture and verify method holder """
@property
def config_flags(self):
return VppEnum.vl_api_nat_config_flags_t
@property
def nat44_config_flags(self):
return VppEnum.vl_api_nat44_config_flags_t
@property
def SYSLOG_SEVERITY(self):
return VppEnum.vl_api_syslog_severity_t
def nat44_add_static_mapping(self, local_ip, external_ip='0.0.0.0',
local_port=0, external_port=0, vrf_id=0,
is_add=1, external_sw_if_index=0xFFFFFFFF,
proto=0, tag="", flags=0):
"""
Add/delete NAT44EI static mapping
:param local_ip: Local IP address
:param external_ip: External IP address
:param local_port: Local port number (Optional)
:param external_port: External port number (Optional)
:param vrf_id: VRF ID (Default 0)
:param is_add: 1 if add, 0 if delete (Default add)
:param external_sw_if_index: External interface instead of IP address
:param proto: IP protocol (Mandatory if port specified)
:param tag: Opaque string tag
:param flags: NAT configuration flags
"""
if not (local_port and external_port):
flags |= self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_static_mapping(
is_add=is_add,
local_ip_address=local_ip,
external_ip_address=external_ip,
external_sw_if_index=external_sw_if_index,
local_port=local_port,
external_port=external_port,
vrf_id=vrf_id, protocol=proto,
flags=flags,
tag=tag)
def nat44_add_address(self, ip, is_add=1, vrf_id=0xFFFFFFFF, twice_nat=0):
"""
Add/delete NAT44EI address
:param ip: IP address
:param is_add: 1 if add, 0 if delete (Default add)
:param twice_nat: twice NAT address for external hosts
"""
flags = self.config_flags.NAT_IS_TWICE_NAT if twice_nat else 0
self.vapi.nat44_add_del_address_range(first_ip_address=ip,
last_ip_address=ip,
vrf_id=vrf_id,
is_add=is_add,
flags=flags)
def create_routes_and_neigbors(self):
r1 = VppIpRoute(self, self.pg7.remote_ip4, 32,
[VppRoutePath(self.pg7.remote_ip4,
self.pg7.sw_if_index)])
r2 = VppIpRoute(self, self.pg8.remote_ip4, 32,
[VppRoutePath(self.pg8.remote_ip4,
self.pg8.sw_if_index)])
r1.add_vpp_config()
r2.add_vpp_config()
n1 = VppNeighbor(self,
self.pg7.sw_if_index,
self.pg7.remote_mac,
self.pg7.remote_ip4,
is_static=1)
n2 = VppNeighbor(self,
self.pg8.sw_if_index,
self.pg8.remote_mac,
self.pg8.remote_ip4,
is_static=1)
n1.add_vpp_config()
n2.add_vpp_config()
def create_stream_in(self, in_if, out_if, dst_ip=None, ttl=64):
"""
Create packet stream for inside network
:param in_if: Inside interface
:param out_if: Outside interface
:param dst_ip: Destination address
:param ttl: TTL of generated packets
"""
if dst_ip is None:
dst_ip = out_if.remote_ip4
pkts = []
# TCP
p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
TCP(sport=self.tcp_port_in, dport=20))
pkts.extend([p, p])
# UDP
p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
UDP(sport=self.udp_port_in, dport=20))
pkts.append(p)
# ICMP
p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
ICMP(id=self.icmp_id_in, type='echo-request'))
pkts.append(p)
return pkts
def compose_ip6(self, ip4, pref, plen):
"""
Compose IPv4-embedded IPv6 addresses
:param ip4: IPv4 address
:param pref: IPv6 prefix
:param plen: IPv6 prefix length
:returns: IPv4-embedded IPv6 addresses
"""
pref_n = list(socket.inet_pton(socket.AF_INET6, pref))
ip4_n = list(socket.inet_pton(socket.AF_INET, ip4))
if plen == 32:
pref_n[4] = ip4_n[0]
pref_n[5] = ip4_n[1]
pref_n[6] = ip4_n[2]
pref_n[7] = ip4_n[3]
elif plen == 40:
pref_n[5] = ip4_n[0]
pref_n[6] = ip4_n[1]
pref_n[7] = ip4_n[2]
pref_n[9] = ip4_n[3]
elif plen == 48:
pref_n[6] = ip4_n[0]
pref_n[7] = ip4_n[1]
pref_n[9] = ip4_n[2]
pref_n[10] = ip4_n[3]
elif plen == 56:
pref_n[7] = ip4_n[0]
pref_n[9] = ip4_n[1]
pref_n[10] = ip4_n[2]
pref_n[11] = ip4_n[3]
elif plen == 64:
pref_n[9] = ip4_n[0]
pref_n[10] = ip4_n[1]
pref_n[11] = ip4_n[2]
pref_n[12] = ip4_n[3]
elif plen == 96:
pref_n[12] = ip4_n[0]
pref_n[13] = ip4_n[1]
pref_n[14] = ip4_n[2]
pref_n[15] = ip4_n[3]
packed_pref_n = b''.join([scapy.compat.chb(x) for x in pref_n])
return socket.inet_ntop(socket.AF_INET6, packed_pref_n)
def create_stream_out(self, out_if, dst_ip=None, ttl=64,
use_inside_ports=False):
"""
Create packet stream for outside network
:param out_if: Outside interface
:param dst_ip: Destination IP address (Default use global NAT address)
:param ttl: TTL of generated packets
:param use_inside_ports: Use inside NAT ports as destination ports
instead of outside ports
"""
if dst_ip is None:
dst_ip = self.nat_addr
if not use_inside_ports:
tcp_port = self.tcp_port_out
udp_port = self.udp_port_out
icmp_id = self.icmp_id_out
else:
tcp_port = self.tcp_port_in
udp_port = self.udp_port_in
icmp_id = self.icmp_id_in
pkts = []
# TCP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
TCP(dport=tcp_port, sport=20))
pkts.extend([p, p])
# UDP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
UDP(dport=udp_port, sport=20))
pkts.append(p)
# ICMP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
ICMP(id=icmp_id, type='echo-reply'))
pkts.append(p)
return pkts
def create_stream_out_ip6(self, out_if, src_ip, dst_ip, hl=64):
"""
Create packet stream for outside network
:param out_if: Outside interface
:param dst_ip: Destination IP address (Default use global NAT address)
:param hl: HL of generated packets
"""
pkts = []
# TCP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
TCP(dport=self.tcp_port_out, sport=20))
pkts.append(p)
# UDP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
UDP(dport=self.udp_port_out, sport=20))
pkts.append(p)
# ICMP
p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
ICMPv6EchoReply(id=self.icmp_id_out))
pkts.append(p)
return pkts
def verify_capture_out(self, capture, nat_ip=None, same_port=False,
dst_ip=None, is_ip6=False, ignore_port=False):
"""
Verify captured packets on outside network
:param capture: Captured packets
:param nat_ip: Translated IP address (Default use global NAT address)
:param same_port: Source port number is not translated (Default False)
:param dst_ip: Destination IP address (Default do not verify)
:param is_ip6: If L3 protocol is IPv6 (Default False)
"""
if is_ip6:
IP46 = IPv6
ICMP46 = ICMPv6EchoRequest
else:
IP46 = IP
ICMP46 = ICMP
if nat_ip is None:
nat_ip = self.nat_addr
for packet in capture:
try:
if not is_ip6:
self.assert_packet_checksums_valid(packet)
self.assertEqual(packet[IP46].src, nat_ip)
if dst_ip is not None:
self.assertEqual(packet[IP46].dst, dst_ip)
if packet.haslayer(TCP):
if not ignore_port:
if same_port:
self.assertEqual(
packet[TCP].sport, self.tcp_port_in)
else:
self.assertNotEqual(
packet[TCP].sport, self.tcp_port_in)
self.tcp_port_out = packet[TCP].sport
self.assert_packet_checksums_valid(packet)
elif packet.haslayer(UDP):
if not ignore_port:
if same_port:
self.assertEqual(
packet[UDP].sport, self.udp_port_in)
else:
self.assertNotEqual(
packet[UDP].sport, self.udp_port_in)
self.udp_port_out = packet[UDP].sport
else:
if not ignore_port:
if same_port:
self.assertEqual(
packet[ICMP46].id, self.icmp_id_in)
else:
self.assertNotEqual(
packet[ICMP46].id, self.icmp_id_in)
self.icmp_id_out = packet[ICMP46].id
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(outside network):", packet))
raise
def verify_capture_out_ip6(self, capture, nat_ip, same_port=False,
dst_ip=None):
"""
Verify captured packets on outside network
:param capture: Captured packets
:param nat_ip: Translated IP address
:param same_port: Source port number is not translated (Default False)
:param dst_ip: Destination IP address (Default do not verify)
"""
return self.verify_capture_out(capture, nat_ip, same_port, dst_ip,
True)
def verify_capture_in(self, capture, in_if):
"""
Verify captured packets on inside network
:param capture: Captured packets
:param in_if: Inside interface
"""
for packet in capture:
try:
self.assert_packet_checksums_valid(packet)
self.assertEqual(packet[IP].dst, in_if.remote_ip4)
if packet.haslayer(TCP):
self.assertEqual(packet[TCP].dport, self.tcp_port_in)
elif packet.haslayer(UDP):
self.assertEqual(packet[UDP].dport, self.udp_port_in)
else:
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(inside network):", packet))
raise
def verify_capture_no_translation(self, capture, ingress_if, egress_if):
"""
Verify captured packet that don't have to be translated
:param capture: Captured packets
:param ingress_if: Ingress interface
:param egress_if: Egress interface
"""
for packet in capture:
try:
self.assertEqual(packet[IP].src, ingress_if.remote_ip4)
self.assertEqual(packet[IP].dst, egress_if.remote_ip4)
if packet.haslayer(TCP):
self.assertEqual(packet[TCP].sport, self.tcp_port_in)
elif packet.haslayer(UDP):
self.assertEqual(packet[UDP].sport, self.udp_port_in)
else:
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(inside network):", packet))
raise
def verify_capture_out_with_icmp_errors(self, capture, src_ip=None,
icmp_type=11):
"""
Verify captured packets with ICMP errors on outside network
:param capture: Captured packets
:param src_ip: Translated IP address or IP address of VPP
(Default use global NAT address)
:param icmp_type: Type of error ICMP packet
we are expecting (Default 11)
"""
if src_ip is None:
src_ip = self.nat_addr
for packet in capture:
try:
self.assertEqual(packet[IP].src, src_ip)
self.assertEqual(packet.haslayer(ICMP), 1)
icmp = packet[ICMP]
self.assertEqual(icmp.type, icmp_type)
self.assertTrue(icmp.haslayer(IPerror))
inner_ip = icmp[IPerror]
if inner_ip.haslayer(TCPerror):
self.assertEqual(inner_ip[TCPerror].dport,
self.tcp_port_out)
elif inner_ip.haslayer(UDPerror):
self.assertEqual(inner_ip[UDPerror].dport,
self.udp_port_out)
else:
self.assertEqual(inner_ip[ICMPerror].id, self.icmp_id_out)
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(outside network):", packet))
raise
def verify_capture_in_with_icmp_errors(self, capture, in_if, icmp_type=11):
"""
Verify captured packets with ICMP errors on inside network
:param capture: Captured packets
:param in_if: Inside interface
:param icmp_type: Type of error ICMP packet
we are expecting (Default 11)
"""
for packet in capture:
try:
self.assertEqual(packet[IP].dst, in_if.remote_ip4)
self.assertEqual(packet.haslayer(ICMP), 1)
icmp = packet[ICMP]
self.assertEqual(icmp.type, icmp_type)
self.assertTrue(icmp.haslayer(IPerror))
inner_ip = icmp[IPerror]
if inner_ip.haslayer(TCPerror):
self.assertEqual(inner_ip[TCPerror].sport,
self.tcp_port_in)
elif inner_ip.haslayer(UDPerror):
self.assertEqual(inner_ip[UDPerror].sport,
self.udp_port_in)
else:
self.assertEqual(inner_ip[ICMPerror].id, self.icmp_id_in)
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(inside network):", packet))
raise
def create_stream_frag(self, src_if, dst, sport, dport, data,
proto=IP_PROTOS.tcp, echo_reply=False):
"""
Create fragmented packet stream
:param src_if: Source interface
:param dst: Destination IPv4 address
:param sport: Source port
:param dport: Destination port
:param data: Payload data
:param proto: protocol (TCP, UDP, ICMP)
:param echo_reply: use echo_reply if protocol is ICMP
:returns: Fragments
"""
if proto == IP_PROTOS.tcp:
p = (IP(src=src_if.remote_ip4, dst=dst) /
TCP(sport=sport, dport=dport) /
Raw(data))
p = p.__class__(scapy.compat.raw(p))
chksum = p[TCP].chksum
proto_header = TCP(sport=sport, dport=dport, chksum=chksum)
elif proto == IP_PROTOS.udp:
proto_header = UDP(sport=sport, dport=dport)
elif proto == IP_PROTOS.icmp:
if not echo_reply:
proto_header = ICMP(id=sport, type='echo-request')
else:
proto_header = ICMP(id=sport, type='echo-reply')
else:
raise Exception("Unsupported protocol")
id = random.randint(0, 65535)
pkts = []
if proto == IP_PROTOS.tcp:
raw = Raw(data[0:4])
else:
raw = Raw(data[0:16])
p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=0, id=id) /
proto_header /
raw)
pkts.append(p)
if proto == IP_PROTOS.tcp:
raw = Raw(data[4:20])
else:
raw = Raw(data[16:32])
p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=3, id=id,
proto=proto) /
raw)
pkts.append(p)
if proto == IP_PROTOS.tcp:
raw = Raw(data[20:])
else:
raw = Raw(data[32:])
p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
IP(src=src_if.remote_ip4, dst=dst, frag=5, proto=proto,
id=id) /
raw)
pkts.append(p)
return pkts
def reass_frags_and_verify(self, frags, src, dst):
"""
Reassemble and verify fragmented packet
:param frags: Captured fragments
:param src: Source IPv4 address to verify
:param dst: Destination IPv4 address to verify
:returns: Reassembled IPv4 packet
"""
buffer = BytesIO()
for p in frags:
self.assertEqual(p[IP].src, src)
self.assertEqual(p[IP].dst, dst)
self.assert_ip_checksum_valid(p)
buffer.seek(p[IP].frag * 8)
buffer.write(bytes(p[IP].payload))
ip = IP(src=frags[0][IP].src, dst=frags[0][IP].dst,
proto=frags[0][IP].proto)
if ip.proto == IP_PROTOS.tcp:
p = (ip / TCP(buffer.getvalue()))
self.logger.debug(ppp("Reassembled:", p))
self.assert_tcp_checksum_valid(p)
elif ip.proto == IP_PROTOS.udp:
p = (ip / UDP(buffer.getvalue()[:8]) /
Raw(buffer.getvalue()[8:]))
elif ip.proto == IP_PROTOS.icmp:
p = (ip / ICMP(buffer.getvalue()))
return p
def verify_ipfix_nat44_ses(self, data):
"""
Verify IPFIX NAT44EI session create/delete event
:param data: Decoded IPFIX data records
"""
nat44_ses_create_num = 0
nat44_ses_delete_num = 0
self.assertEqual(6, len(data))
for record in data:
# natEvent
self.assertIn(scapy.compat.orb(record[230]), [4, 5])
if scapy.compat.orb(record[230]) == 4:
nat44_ses_create_num += 1
else:
nat44_ses_delete_num += 1
# sourceIPv4Address
self.assertEqual(self.pg0.remote_ip4,
str(ipaddress.IPv4Address(record[8])))
# postNATSourceIPv4Address
self.assertEqual(socket.inet_pton(socket.AF_INET, self.nat_addr),
record[225])
# ingressVRFID
self.assertEqual(struct.pack("!I", 0), record[234])
# protocolIdentifier/sourceTransportPort
# /postNAPTSourceTransportPort
if IP_PROTOS.icmp == scapy.compat.orb(record[4]):
self.assertEqual(struct.pack("!H", self.icmp_id_in), record[7])
self.assertEqual(struct.pack("!H", self.icmp_id_out),
record[227])
elif IP_PROTOS.tcp == scapy.compat.orb(record[4]):
self.assertEqual(struct.pack("!H", self.tcp_port_in),
record[7])
self.assertEqual(struct.pack("!H", self.tcp_port_out),
record[227])
elif IP_PROTOS.udp == scapy.compat.orb(record[4]):
self.assertEqual(struct.pack("!H", self.udp_port_in),
record[7])
self.assertEqual(struct.pack("!H", self.udp_port_out),
record[227])
else:
self.fail("Invalid protocol")
self.assertEqual(3, nat44_ses_create_num)
self.assertEqual(3, nat44_ses_delete_num)
def verify_ipfix_addr_exhausted(self, data):
self.assertEqual(1, len(data))
record = data[0]
# natEvent
self.assertEqual(scapy.compat.orb(record[230]), 3)
# natPoolID
self.assertEqual(struct.pack("!I", 0), record[283])
def verify_ipfix_max_sessions(self, data, limit):
self.assertEqual(1, len(data))
record = data[0]
# natEvent
self.assertEqual(scapy.compat.orb(record[230]), 13)
# natQuotaExceededEvent
self.assertEqual(struct.pack("I", 1), record[466])
# maxSessionEntries
self.assertEqual(struct.pack("I", limit), record[471])
def verify_no_nat44_user(self):
""" Verify that there is no NAT44EI user """
users = self.vapi.nat44_user_dump()
self.assertEqual(len(users), 0)
users = self.statistics.get_counter('/nat44/total-users')
self.assertEqual(users[0][0], 0)
sessions = self.statistics.get_counter('/nat44/total-sessions')
self.assertEqual(sessions[0][0], 0)
def verify_syslog_apmap(self, data, is_add=True):
message = data.decode('utf-8')
try:
message = SyslogMessage.parse(message)
except ParseError as e:
self.logger.error(e)
raise
else:
self.assertEqual(message.severity, SyslogSeverity.info)
self.assertEqual(message.appname, 'NAT')
self.assertEqual(message.msgid, 'APMADD' if is_add else 'APMDEL')
sd_params = message.sd.get('napmap')
self.assertTrue(sd_params is not None)
self.assertEqual(sd_params.get('IATYP'), 'IPv4')
self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip4)
self.assertEqual(sd_params.get('ISPORT'), "%d" % self.tcp_port_in)
self.assertEqual(sd_params.get('XATYP'), 'IPv4')
self.assertEqual(sd_params.get('XSADDR'), self.nat_addr)
self.assertEqual(sd_params.get('XSPORT'), "%d" % self.tcp_port_out)
self.assertEqual(sd_params.get('PROTO'), "%d" % IP_PROTOS.tcp)
self.assertTrue(sd_params.get('SSUBIX') is not None)
self.assertEqual(sd_params.get('SVLAN'), '0')
def verify_mss_value(self, pkt, mss):
if not pkt.haslayer(IP) or not pkt.haslayer(TCP):
raise TypeError("Not a TCP/IP packet")
for option in pkt[TCP].options:
if option[0] == 'MSS':
self.assertEqual(option[1], mss)
self.assert_tcp_checksum_valid(pkt)
@staticmethod
def proto2layer(proto):
if proto == IP_PROTOS.tcp:
return TCP
elif proto == IP_PROTOS.udp:
return UDP
elif proto == IP_PROTOS.icmp:
return ICMP
else:
raise Exception("Unsupported protocol")
def frag_in_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
ignore_port=False):
layer = self.proto2layer(proto)
if proto == IP_PROTOS.tcp:
data = b"A" * 4 + b"B" * 16 + b"C" * 3
else:
data = b"A" * 16 + b"B" * 16 + b"C" * 3
self.port_in = random.randint(1025, 65535)
# in2out
pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
self.port_in, 20, data, proto)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg1.get_capture(len(pkts))
if not dont_translate:
p = self.reass_frags_and_verify(frags,
self.nat_addr,
self.pg1.remote_ip4)
else:
p = self.reass_frags_and_verify(frags,
self.pg0.remote_ip4,
self.pg1.remote_ip4)
if proto != IP_PROTOS.icmp:
if not dont_translate:
self.assertEqual(p[layer].dport, 20)
if not ignore_port:
self.assertNotEqual(p[layer].sport, self.port_in)
else:
self.assertEqual(p[layer].sport, self.port_in)
else:
if not ignore_port:
if not dont_translate:
self.assertNotEqual(p[layer].id, self.port_in)
else:
self.assertEqual(p[layer].id, self.port_in)
self.assertEqual(data, p[Raw].load)
# out2in
if not dont_translate:
dst_addr = self.nat_addr
else:
dst_addr = self.pg0.remote_ip4
if proto != IP_PROTOS.icmp:
sport = 20
dport = p[layer].sport
else:
sport = p[layer].id
dport = 0
pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport, data,
proto, echo_reply=True)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg0.get_capture(len(pkts))
p = self.reass_frags_and_verify(frags,
self.pg1.remote_ip4,
self.pg0.remote_ip4)
if proto != IP_PROTOS.icmp:
self.assertEqual(p[layer].sport, 20)
self.assertEqual(p[layer].dport, self.port_in)
else:
self.assertEqual(p[layer].id, self.port_in)
self.assertEqual(data, p[Raw].load)
def reass_hairpinning(self, server_addr, server_in_port, server_out_port,
host_in_port, proto=IP_PROTOS.tcp,
ignore_port=False):
layer = self.proto2layer(proto)
if proto == IP_PROTOS.tcp:
data = b"A" * 4 + b"B" * 16 + b"C" * 3
else:
data = b"A" * 16 + b"B" * 16 + b"C" * 3
# send packet from host to server
pkts = self.create_stream_frag(self.pg0,
self.nat_addr,
host_in_port,
server_out_port,
data,
proto)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg0.get_capture(len(pkts))
p = self.reass_frags_and_verify(frags,
self.nat_addr,
server_addr)
if proto != IP_PROTOS.icmp:
if not ignore_port:
self.assertNotEqual(p[layer].sport, host_in_port)
self.assertEqual(p[layer].dport, server_in_port)
else:
if not ignore_port:
self.assertNotEqual(p[layer].id, host_in_port)
self.assertEqual(data, p[Raw].load)
def frag_out_of_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
ignore_port=False):
layer = self.proto2layer(proto)
if proto == IP_PROTOS.tcp:
data = b"A" * 4 + b"B" * 16 + b"C" * 3
else:
data = b"A" * 16 + b"B" * 16 + b"C" * 3
self.port_in = random.randint(1025, 65535)
for i in range(2):
# in2out
pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
self.port_in, 20, data, proto)
pkts.reverse()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg1.get_capture(len(pkts))
if not dont_translate:
p = self.reass_frags_and_verify(frags,
self.nat_addr,
self.pg1.remote_ip4)
else:
p = self.reass_frags_and_verify(frags,
self.pg0.remote_ip4,
self.pg1.remote_ip4)
if proto != IP_PROTOS.icmp:
if not dont_translate:
self.assertEqual(p[layer].dport, 20)
if not ignore_port:
self.assertNotEqual(p[layer].sport, self.port_in)
else:
self.assertEqual(p[layer].sport, self.port_in)
else:
if not ignore_port:
if not dont_translate:
self.assertNotEqual(p[layer].id, self.port_in)
else:
self.assertEqual(p[layer].id, self.port_in)
self.assertEqual(data, p[Raw].load)
# out2in
if not dont_translate:
dst_addr = self.nat_addr
else:
dst_addr = self.pg0.remote_ip4
if proto != IP_PROTOS.icmp:
sport = 20
dport = p[layer].sport
else:
sport = p[layer].id
dport = 0
pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport,
data, proto, echo_reply=True)
pkts.reverse()
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg0.get_capture(len(pkts))
p = self.reass_frags_and_verify(frags,
self.pg1.remote_ip4,
self.pg0.remote_ip4)
if proto != IP_PROTOS.icmp:
self.assertEqual(p[layer].sport, 20)
self.assertEqual(p[layer].dport, self.port_in)
else:
self.assertEqual(p[layer].id, self.port_in)
self.assertEqual(data, p[Raw].load)
class TestNAT44EI(MethodHolder):
""" NAT44EI Test Cases """
max_translations = 10240
max_users = 10240
@classmethod
def setUpClass(cls):
super(TestNAT44EI, cls).setUpClass()
cls.vapi.cli("set log class nat level debug")
cls.tcp_port_in = 6303
cls.tcp_port_out = 6303
cls.udp_port_in = 6304
cls.udp_port_out = 6304
cls.icmp_id_in = 6305
cls.icmp_id_out = 6305
cls.nat_addr = '10.0.0.3'
cls.ipfix_src_port = 4739
cls.ipfix_domain_id = 1
cls.tcp_external_port = 80
cls.udp_external_port = 69
cls.create_pg_interfaces(range(10))
cls.interfaces = list(cls.pg_interfaces[0:4])
for i in cls.interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
cls.pg0.generate_remote_hosts(3)
cls.pg0.configure_ipv4_neighbors()
cls.pg1.generate_remote_hosts(1)
cls.pg1.configure_ipv4_neighbors()
cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7]))
cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 10})
cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 20})
cls.pg4._local_ip4 = "172.16.255.1"
cls.pg4._remote_hosts[0]._ip4 = "172.16.255.2"
cls.pg4.set_table_ip4(10)
cls.pg5._local_ip4 = "172.17.255.3"
cls.pg5._remote_hosts[0]._ip4 = "172.17.255.4"
cls.pg5.set_table_ip4(10)
cls.pg6._local_ip4 = "172.16.255.1"
cls.pg6._remote_hosts[0]._ip4 = "172.16.255.2"
cls.pg6.set_table_ip4(20)
for i in cls.overlapping_interfaces:
i.config_ip4()
i.admin_up()
i.resolve_arp()
cls.pg7.admin_up()
cls.pg8.admin_up()
cls.pg9.generate_remote_hosts(2)
cls.pg9.config_ip4()
cls.vapi.sw_interface_add_del_address(
sw_if_index=cls.pg9.sw_if_index,
prefix="10.0.0.1/24")
cls.pg9.admin_up()
cls.pg9.resolve_arp()
cls.pg9._remote_hosts[1]._ip4 = cls.pg9._remote_hosts[0]._ip4
cls.pg4._remote_ip4 = cls.pg9._remote_hosts[0]._ip4 = "10.0.0.2"
cls.pg9.resolve_arp()
def setUp(self):
super(TestNAT44EI, self).setUp()
self.vapi.nat44_plugin_enable_disable(
sessions=self.max_translations,
users=self.max_users, enable=1)
def tearDown(self):
super(TestNAT44EI, self).tearDown()
if not self.vpp_dead:
self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
src_port=self.ipfix_src_port,
enable=0)
self.ipfix_src_port = 4739
self.ipfix_domain_id = 1
self.vapi.nat44_plugin_enable_disable(enable=0)
self.vapi.cli("clear logging")
def test_clear_sessions(self):
""" NAT44EI session clearing test """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
nat_config = self.vapi.nat_show_config()
self.assertEqual(0, nat_config.endpoint_dependent)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
sessions = self.statistics.get_counter('/nat44/total-sessions')
self.assertTrue(sessions[0][0] > 0)
self.logger.info("sessions before clearing: %s" % sessions[0][0])
self.vapi.cli("clear nat44 sessions")
sessions = self.statistics.get_counter('/nat44/total-sessions')
self.assertEqual(sessions[0][0], 0)
self.logger.info("sessions after clearing: %s" % sessions[0][0])
def test_dynamic(self):
""" NAT44EI dynamic translation test """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# in2out
tcpn = self.statistics.get_counter('/nat44/in2out/slowpath/tcp')[0]
udpn = self.statistics.get_counter('/nat44/in2out/slowpath/udp')[0]
icmpn = self.statistics.get_counter('/nat44/in2out/slowpath/icmp')[0]
drops = self.statistics.get_counter('/nat44/in2out/slowpath/drops')[0]
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
if_idx = self.pg0.sw_if_index
cnt = self.statistics.get_counter('/nat44/in2out/slowpath/tcp')[0]
self.assertEqual(cnt[if_idx] - tcpn[if_idx], 2)
cnt = self.statistics.get_counter('/nat44/in2out/slowpath/udp')[0]
self.assertEqual(cnt[if_idx] - udpn[if_idx], 1)
cnt = self.statistics.get_counter('/nat44/in2out/slowpath/icmp')[0]
self.assertEqual(cnt[if_idx] - icmpn[if_idx], 1)
cnt = self.statistics.get_counter('/nat44/in2out/slowpath/drops')[0]
self.assertEqual(cnt[if_idx] - drops[if_idx], 0)
# out2in
tcpn = self.statistics.get_counter('/nat44/out2in/slowpath/tcp')[0]
udpn = self.statistics.get_counter('/nat44/out2in/slowpath/udp')[0]
icmpn = self.statistics.get_counter('/nat44/out2in/slowpath/icmp')[0]
drops = self.statistics.get_counter('/nat44/out2in/slowpath/drops')[0]
pkts = self.create_stream_out(self.pg1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
if_idx = self.pg1.sw_if_index
cnt = self.statistics.get_counter('/nat44/out2in/slowpath/tcp')[0]
self.assertEqual(cnt[if_idx] - tcpn[if_idx], 2)
cnt = self.statistics.get_counter('/nat44/out2in/slowpath/udp')[0]
self.assertEqual(cnt[if_idx] - udpn[if_idx], 1)
cnt = self.statistics.get_counter('/nat44/out2in/slowpath/icmp')[0]
self.assertEqual(cnt[if_idx] - icmpn[if_idx], 1)
cnt = self.statistics.get_counter('/nat44/out2in/slowpath/drops')[0]
self.assertEqual(cnt[if_idx] - drops[if_idx], 0)
users = self.statistics.get_counter('/nat44/total-users')
self.assertEqual(users[0][0], 1)
sessions = self.statistics.get_counter('/nat44/total-sessions')
self.assertEqual(sessions[0][0], 3)
def test_dynamic_icmp_errors_in2out_ttl_1(self):
""" NAT44EI handling of client packets with TTL=1 """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# Client side - generate traffic
pkts = self.create_stream_in(self.pg0, self.pg1, ttl=1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Client side - verify ICMP type 11 packets
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in_with_icmp_errors(capture, self.pg0)
def test_dynamic_icmp_errors_out2in_ttl_1(self):
""" NAT44EI handling of server packets with TTL=1 """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# Client side - create sessions
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Server side - generate traffic
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
pkts = self.create_stream_out(self.pg1, ttl=1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Server side - verify ICMP type 11 packets
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out_with_icmp_errors(capture,
src_ip=self.pg1.local_ip4)
def test_dynamic_icmp_errors_in2out_ttl_2(self):
""" NAT44EI handling of error responses to client packets with TTL=2
"""
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# Client side - generate traffic
pkts = self.create_stream_in(self.pg0, self.pg1, ttl=2)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Server side - simulate ICMP type 11 response
capture = self.pg1.get_capture(len(pkts))
pkts = [Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
ICMP(type=11) / packet[IP] for packet in capture]
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Client side - verify ICMP type 11 packets
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in_with_icmp_errors(capture, self.pg0)
def test_dynamic_icmp_errors_out2in_ttl_2(self):
""" NAT44EI handling of error responses to server packets with TTL=2
"""
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# Client side - create sessions
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Server side - generate traffic
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
pkts = self.create_stream_out(self.pg1, ttl=2)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Client side - simulate ICMP type 11 response
capture = self.pg0.get_capture(len(pkts))
pkts = [Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
ICMP(type=11) / packet[IP] for packet in capture]
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Server side - verify ICMP type 11 packets
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out_with_icmp_errors(capture)
def test_ping_out_interface_from_outside(self):
""" NAT44EI ping out interface from outside network """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IP(src=self.pg1.remote_ip4, dst=self.pg1.local_ip4) /
ICMP(id=self.icmp_id_out, type='echo-request'))
pkts = [p]
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
packet = capture[0]
try:
self.assertEqual(packet[IP].src, self.pg1.local_ip4)
self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
self.assertEqual(packet[ICMP].type, 0) # echo reply
except:
self.logger.error(ppp("Unexpected or invalid packet "
"(outside network):", packet))
raise
def test_ping_internal_host_from_outside(self):
""" NAT44EI ping internal host from outside network """
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# out2in
pkt = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IP(src=self.pg1.remote_ip4, dst=self.nat_addr, ttl=64) /
ICMP(id=self.icmp_id_out, type='echo-request'))
self.pg1.add_stream(pkt)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
self.verify_capture_in(capture, self.pg0)
self.assert_equal(capture[0][IP].proto, IP_PROTOS.icmp)
# in2out
pkt = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4, ttl=64) /
ICMP(id=self.icmp_id_in, type='echo-reply'))
self.pg0.add_stream(pkt)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
self.verify_capture_out(capture, same_port=True)
self.assert_equal(capture[0][IP].proto, IP_PROTOS.icmp)
def test_forwarding(self):
""" NAT44EI forwarding test """
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat44_forwarding_enable_disable(enable=1)
real_ip = self.pg0.remote_ip4
alias_ip = self.nat_addr
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_static_mapping(is_add=1,
local_ip_address=real_ip,
external_ip_address=alias_ip,
external_sw_if_index=0xFFFFFFFF,
flags=flags)
try:
# static mapping match
pkts = self.create_stream_out(self.pg1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, same_port=True)
# no static mapping match
host0 = self.pg0.remote_hosts[0]
self.pg0.remote_hosts[0] = self.pg0.remote_hosts[1]
try:
pkts = self.create_stream_out(self.pg1,
dst_ip=self.pg0.remote_ip4,
use_inside_ports=True)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4,
same_port=True)
finally:
self.pg0.remote_hosts[0] = host0
finally:
self.vapi.nat44_forwarding_enable_disable(enable=0)
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_static_mapping(
is_add=0,
local_ip_address=real_ip,
external_ip_address=alias_ip,
external_sw_if_index=0xFFFFFFFF,
flags=flags)
def test_static_in(self):
""" NAT44EI 1:1 NAT initialized from inside network """
nat_ip = "10.0.0.10"
self.tcp_port_out = 6303
self.udp_port_out = 6304
self.icmp_id_out = 6305
self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
sm = self.vapi.nat44_static_mapping_dump()
self.assertEqual(len(sm), 1)
self.assertEqual(sm[0].tag, '')
self.assertEqual(sm[0].protocol, 0)
self.assertEqual(sm[0].local_port, 0)
self.assertEqual(sm[0].external_port, 0)
# in2out
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip, True)
# out2in
pkts = self.create_stream_out(self.pg1, nat_ip)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
def test_static_out(self):
""" NAT44EI 1:1 NAT initialized from outside network """
nat_ip = "10.0.0.20"
self.tcp_port_out = 6303
self.udp_port_out = 6304
self.icmp_id_out = 6305
tag = "testTAG"
self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip, tag=tag)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
sm = self.vapi.nat44_static_mapping_dump()
self.assertEqual(len(sm), 1)
self.assertEqual(sm[0].tag, tag)
# out2in
pkts = self.create_stream_out(self.pg1, nat_ip)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
# in2out
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip, True)
def test_static_with_port_in(self):
""" NAT44EI 1:1 NAPT initialized from inside network """
self.tcp_port_out = 3606
self.udp_port_out = 3607
self.icmp_id_out = 3608
self.nat44_add_address(self.nat_addr)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.tcp_port_in, self.tcp_port_out,
proto=IP_PROTOS.tcp)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.udp_port_in, self.udp_port_out,
proto=IP_PROTOS.udp)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.icmp_id_in, self.icmp_id_out,
proto=IP_PROTOS.icmp)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# in2out
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in
pkts = self.create_stream_out(self.pg1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
def test_static_with_port_out(self):
""" NAT44EI 1:1 NAPT initialized from outside network """
self.tcp_port_out = 30606
self.udp_port_out = 30607
self.icmp_id_out = 30608
self.nat44_add_address(self.nat_addr)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.tcp_port_in, self.tcp_port_out,
proto=IP_PROTOS.tcp)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.udp_port_in, self.udp_port_out,
proto=IP_PROTOS.udp)
self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
self.icmp_id_in, self.icmp_id_out,
proto=IP_PROTOS.icmp)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# out2in
pkts = self.create_stream_out(self.pg1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
# in2out
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
def test_static_vrf_aware(self):
""" NAT44EI 1:1 NAT VRF awareness """
nat_ip1 = "10.0.0.30"
nat_ip2 = "10.0.0.40"
self.tcp_port_out = 6303
self.udp_port_out = 6304
self.icmp_id_out = 6305
self.nat44_add_static_mapping(self.pg4.remote_ip4, nat_ip1,
vrf_id=10)
self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip2,
vrf_id=10)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg3.sw_if_index,
is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg4.sw_if_index,
flags=flags, is_add=1)
# inside interface VRF match NAT44EI static mapping VRF
pkts = self.create_stream_in(self.pg4, self.pg3)
self.pg4.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip1, True)
# inside interface VRF don't match NAT44EI static mapping VRF (packets
# are dropped)
pkts = self.create_stream_in(self.pg0, self.pg3)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg3.assert_nothing_captured()
def test_dynamic_to_static(self):
""" NAT44EI Switch from dynamic translation to 1:1NAT """
nat_ip = "10.0.0.10"
self.tcp_port_out = 6303
self.udp_port_out = 6304
self.icmp_id_out = 6305
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# dynamic
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
# 1:1NAT
self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
self.assertEqual(len(sessions), 0)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip, True)
def test_identity_nat(self):
""" NAT44EI Identity NAT """
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_identity_mapping(
ip_address=self.pg0.remote_ip4, sw_if_index=0xFFFFFFFF,
flags=flags, is_add=1)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4) /
TCP(sport=12345, dport=56789))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.dst, self.pg0.remote_ip4)
self.assertEqual(ip.src, self.pg1.remote_ip4)
self.assertEqual(tcp.dport, 56789)
self.assertEqual(tcp.sport, 12345)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
self.assertEqual(len(sessions), 0)
flags = self.config_flags.NAT_IS_ADDR_ONLY
self.vapi.nat44_add_del_identity_mapping(
ip_address=self.pg0.remote_ip4, sw_if_index=0xFFFFFFFF,
flags=flags, vrf_id=1, is_add=1)
identity_mappings = self.vapi.nat44_identity_mapping_dump()
self.assertEqual(len(identity_mappings), 2)
def test_multiple_inside_interfaces(self):
""" NAT44EI multiple non-overlapping address space inside interfaces
"""
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg3.sw_if_index,
is_add=1)
# between two NAT44EI inside interfaces (no translation)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_no_translation(capture, self.pg0, self.pg1)
# from inside to interface without translation
pkts = self.create_stream_in(self.pg0, self.pg2)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_no_translation(capture, self.pg0, self.pg2)
# in2out 1st interface
pkts = self.create_stream_in(self.pg0, self.pg3)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in 1st interface
pkts = self.create_stream_out(self.pg3)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
# in2out 2nd interface
pkts = self.create_stream_in(self.pg1, self.pg3)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in 2nd interface
pkts = self.create_stream_out(self.pg3)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg1)
def test_inside_overlapping_interfaces(self):
""" NAT44EI multiple inside interfaces with overlapping address space
"""
static_nat_ip = "10.0.0.10"
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg3.sw_if_index,
is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg4.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg5.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg6.sw_if_index,
flags=flags, is_add=1)
self.nat44_add_static_mapping(self.pg6.remote_ip4, static_nat_ip,
vrf_id=20)
# between NAT44EI inside interfaces with same VRF (no translation)
pkts = self.create_stream_in(self.pg4, self.pg5)
self.pg4.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg5.get_capture(len(pkts))
self.verify_capture_no_translation(capture, self.pg4, self.pg5)
# between NAT44EI inside interfaces with different VRF (hairpinning)
p = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
IP(src=self.pg4.remote_ip4, dst=static_nat_ip) /
TCP(sport=1234, dport=5678))
self.pg4.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg6.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, self.pg6.remote_ip4)
self.assertNotEqual(tcp.sport, 1234)
self.assertEqual(tcp.dport, 5678)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
# in2out 1st interface
pkts = self.create_stream_in(self.pg4, self.pg3)
self.pg4.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in 1st interface
pkts = self.create_stream_out(self.pg3)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg4.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg4)
# in2out 2nd interface
pkts = self.create_stream_in(self.pg5, self.pg3)
self.pg5.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in 2nd interface
pkts = self.create_stream_out(self.pg3)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg5.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg5)
# pg5 session dump
addresses = self.vapi.nat44_address_dump()
self.assertEqual(len(addresses), 1)
sessions = self.vapi.nat44_user_session_dump(self.pg5.remote_ip4, 10)
self.assertEqual(len(sessions), 3)
for session in sessions:
self.assertFalse(session.flags & self.config_flags.NAT_IS_STATIC)
self.assertEqual(str(session.inside_ip_address),
self.pg5.remote_ip4)
self.assertEqual(session.outside_ip_address,
addresses[0].ip_address)
self.assertEqual(sessions[0].protocol, IP_PROTOS.tcp)
self.assertEqual(sessions[1].protocol, IP_PROTOS.udp)
self.assertEqual(sessions[2].protocol, IP_PROTOS.icmp)
self.assertEqual(sessions[0].inside_port, self.tcp_port_in)
self.assertEqual(sessions[1].inside_port, self.udp_port_in)
self.assertEqual(sessions[2].inside_port, self.icmp_id_in)
self.assertEqual(sessions[0].outside_port, self.tcp_port_out)
self.assertEqual(sessions[1].outside_port, self.udp_port_out)
self.assertEqual(sessions[2].outside_port, self.icmp_id_out)
# in2out 3rd interface
pkts = self.create_stream_in(self.pg6, self.pg3)
self.pg6.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture, static_nat_ip, True)
# out2in 3rd interface
pkts = self.create_stream_out(self.pg3, static_nat_ip)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg6.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg6)
# general user and session dump verifications
users = self.vapi.nat44_user_dump()
self.assertGreaterEqual(len(users), 3)
addresses = self.vapi.nat44_address_dump()
self.assertEqual(len(addresses), 1)
for user in users:
sessions = self.vapi.nat44_user_session_dump(user.ip_address,
user.vrf_id)
for session in sessions:
self.assertEqual(user.ip_address, session.inside_ip_address)
self.assertTrue(session.total_bytes > session.total_pkts > 0)
self.assertTrue(session.protocol in
[IP_PROTOS.tcp, IP_PROTOS.udp,
IP_PROTOS.icmp])
self.assertFalse(session.flags &
self.config_flags.NAT_IS_EXT_HOST_VALID)
# pg4 session dump
sessions = self.vapi.nat44_user_session_dump(self.pg4.remote_ip4, 10)
self.assertGreaterEqual(len(sessions), 4)
for session in sessions:
self.assertFalse(session.flags & self.config_flags.NAT_IS_STATIC)
self.assertEqual(str(session.inside_ip_address),
self.pg4.remote_ip4)
self.assertEqual(session.outside_ip_address,
addresses[0].ip_address)
# pg6 session dump
sessions = self.vapi.nat44_user_session_dump(self.pg6.remote_ip4, 20)
self.assertGreaterEqual(len(sessions), 3)
for session in sessions:
self.assertTrue(session.flags & self.config_flags.NAT_IS_STATIC)
self.assertEqual(str(session.inside_ip_address),
self.pg6.remote_ip4)
self.assertEqual(str(session.outside_ip_address),
static_nat_ip)
self.assertTrue(session.inside_port in
[self.tcp_port_in, self.udp_port_in,
self.icmp_id_in])
def test_hairpinning(self):
""" NAT44EI hairpinning - 1:1 NAPT """
host = self.pg0.remote_hosts[0]
server = self.pg0.remote_hosts[1]
host_in_port = 1234
host_out_port = 0
server_in_port = 5678
server_out_port = 8765
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# add static mapping for server
self.nat44_add_static_mapping(server.ip4, self.nat_addr,
server_in_port, server_out_port,
proto=IP_PROTOS.tcp)
cnt = self.statistics.get_counter('/nat44/hairpinning')[0]
# send packet from host to server
p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
IP(src=host.ip4, dst=self.nat_addr) /
TCP(sport=host_in_port, dport=server_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, server.ip4)
self.assertNotEqual(tcp.sport, host_in_port)
self.assertEqual(tcp.dport, server_in_port)
self.assert_packet_checksums_valid(p)
host_out_port = tcp.sport
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
after = self.statistics.get_counter('/nat44/hairpinning')[0]
if_idx = self.pg0.sw_if_index
self.assertEqual(after[if_idx] - cnt[if_idx], 1)
# send reply from server to host
p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
IP(src=server.ip4, dst=self.nat_addr) /
TCP(sport=server_in_port, dport=host_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, host.ip4)
self.assertEqual(tcp.sport, server_out_port)
self.assertEqual(tcp.dport, host_in_port)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
after = self.statistics.get_counter('/nat44/hairpinning')[0]
if_idx = self.pg0.sw_if_index
self.assertEqual(after[if_idx] - cnt[if_idx], 2)
def test_hairpinning2(self):
""" NAT44EI hairpinning - 1:1 NAT"""
server1_nat_ip = "10.0.0.10"
server2_nat_ip = "10.0.0.11"
host = self.pg0.remote_hosts[0]
server1 = self.pg0.remote_hosts[1]
server2 = self.pg0.remote_hosts[2]
server_tcp_port = 22
server_udp_port = 20
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# add static mapping for servers
self.nat44_add_static_mapping(server1.ip4, server1_nat_ip)
self.nat44_add_static_mapping(server2.ip4, server2_nat_ip)
# host to server1
pkts = []
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=host.ip4, dst=server1_nat_ip) /
TCP(sport=self.tcp_port_in, dport=server_tcp_port))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=host.ip4, dst=server1_nat_ip) /
UDP(sport=self.udp_port_in, dport=server_udp_port))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=host.ip4, dst=server1_nat_ip) /
ICMP(id=self.icmp_id_in, type='echo-request'))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IP].src, self.nat_addr)
self.assertEqual(packet[IP].dst, server1.ip4)
if packet.haslayer(TCP):
self.assertNotEqual(packet[TCP].sport, self.tcp_port_in)
self.assertEqual(packet[TCP].dport, server_tcp_port)
self.tcp_port_out = packet[TCP].sport
self.assert_packet_checksums_valid(packet)
elif packet.haslayer(UDP):
self.assertNotEqual(packet[UDP].sport, self.udp_port_in)
self.assertEqual(packet[UDP].dport, server_udp_port)
self.udp_port_out = packet[UDP].sport
else:
self.assertNotEqual(packet[ICMP].id, self.icmp_id_in)
self.icmp_id_out = packet[ICMP].id
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# server1 to host
pkts = []
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=self.nat_addr) /
TCP(sport=server_tcp_port, dport=self.tcp_port_out))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=self.nat_addr) /
UDP(sport=server_udp_port, dport=self.udp_port_out))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=self.nat_addr) /
ICMP(id=self.icmp_id_out, type='echo-reply'))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IP].src, server1_nat_ip)
self.assertEqual(packet[IP].dst, host.ip4)
if packet.haslayer(TCP):
self.assertEqual(packet[TCP].dport, self.tcp_port_in)
self.assertEqual(packet[TCP].sport, server_tcp_port)
self.assert_packet_checksums_valid(packet)
elif packet.haslayer(UDP):
self.assertEqual(packet[UDP].dport, self.udp_port_in)
self.assertEqual(packet[UDP].sport, server_udp_port)
else:
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# server2 to server1
pkts = []
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server2.ip4, dst=server1_nat_ip) /
TCP(sport=self.tcp_port_in, dport=server_tcp_port))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server2.ip4, dst=server1_nat_ip) /
UDP(sport=self.udp_port_in, dport=server_udp_port))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server2.ip4, dst=server1_nat_ip) /
ICMP(id=self.icmp_id_in, type='echo-request'))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IP].src, server2_nat_ip)
self.assertEqual(packet[IP].dst, server1.ip4)
if packet.haslayer(TCP):
self.assertEqual(packet[TCP].sport, self.tcp_port_in)
self.assertEqual(packet[TCP].dport, server_tcp_port)
self.tcp_port_out = packet[TCP].sport
self.assert_packet_checksums_valid(packet)
elif packet.haslayer(UDP):
self.assertEqual(packet[UDP].sport, self.udp_port_in)
self.assertEqual(packet[UDP].dport, server_udp_port)
self.udp_port_out = packet[UDP].sport
else:
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
self.icmp_id_out = packet[ICMP].id
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# server1 to server2
pkts = []
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=server2_nat_ip) /
TCP(sport=server_tcp_port, dport=self.tcp_port_out))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=server2_nat_ip) /
UDP(sport=server_udp_port, dport=self.udp_port_out))
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=server1.ip4, dst=server2_nat_ip) /
ICMP(id=self.icmp_id_out, type='echo-reply'))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IP].src, server1_nat_ip)
self.assertEqual(packet[IP].dst, server2.ip4)
if packet.haslayer(TCP):
self.assertEqual(packet[TCP].dport, self.tcp_port_in)
self.assertEqual(packet[TCP].sport, server_tcp_port)
self.assert_packet_checksums_valid(packet)
elif packet.haslayer(UDP):
self.assertEqual(packet[UDP].dport, self.udp_port_in)
self.assertEqual(packet[UDP].sport, server_udp_port)
else:
self.assertEqual(packet[ICMP].id, self.icmp_id_in)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
def test_hairpinning_avoid_inf_loop(self):
""" NAT44 hairpinning - 1:1 NAPT avoid infinite loop """
host = self.pg0.remote_hosts[0]
server = self.pg0.remote_hosts[1]
host_in_port = 1234
host_out_port = 0
server_in_port = 5678
server_out_port = 8765
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# add static mapping for server
self.nat44_add_static_mapping(server.ip4, self.nat_addr,
server_in_port, server_out_port,
proto=IP_PROTOS.tcp)
# add another static mapping that maps pg0.local_ip4 address to itself
self.nat44_add_static_mapping(self.pg0.local_ip4, self.pg0.local_ip4)
# send packet from host to VPP (the packet should get dropped)
p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
IP(src=host.ip4, dst=self.pg0.local_ip4) /
TCP(sport=host_in_port, dport=server_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Here VPP used to crash due to an infinite loop
cnt = self.statistics.get_counter('/nat44/hairpinning')[0]
# send packet from host to server
p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
IP(src=host.ip4, dst=self.nat_addr) /
TCP(sport=host_in_port, dport=server_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, server.ip4)
self.assertNotEqual(tcp.sport, host_in_port)
self.assertEqual(tcp.dport, server_in_port)
self.assert_packet_checksums_valid(p)
host_out_port = tcp.sport
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
after = self.statistics.get_counter('/nat44/hairpinning')[0]
if_idx = self.pg0.sw_if_index
self.assertEqual(after[if_idx] - cnt[if_idx], 1)
# send reply from server to host
p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
IP(src=server.ip4, dst=self.nat_addr) /
TCP(sport=server_in_port, dport=host_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, host.ip4)
self.assertEqual(tcp.sport, server_out_port)
self.assertEqual(tcp.dport, host_in_port)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
after = self.statistics.get_counter('/nat44/hairpinning')[0]
if_idx = self.pg0.sw_if_index
self.assertEqual(after[if_idx] - cnt[if_idx], 2)
def test_interface_addr(self):
""" NAT44EI acquire addresses from interface """
self.vapi.nat44_add_del_interface_addr(
is_add=1,
sw_if_index=self.pg7.sw_if_index)
# no address in NAT pool
addresses = self.vapi.nat44_address_dump()
self.assertEqual(0, len(addresses))
# configure interface address and check NAT address pool
self.pg7.config_ip4()
addresses = self.vapi.nat44_address_dump()
self.assertEqual(1, len(addresses))
self.assertEqual(str(addresses[0].ip_address), self.pg7.local_ip4)
# remove interface address and check NAT address pool
self.pg7.unconfig_ip4()
addresses = self.vapi.nat44_address_dump()
self.assertEqual(0, len(addresses))
def test_interface_addr_static_mapping(self):
""" NAT44EI Static mapping with addresses from interface """
tag = "testTAG"
self.vapi.nat44_add_del_interface_addr(
is_add=1,
sw_if_index=self.pg7.sw_if_index)
self.nat44_add_static_mapping(
'1.2.3.4',
external_sw_if_index=self.pg7.sw_if_index,
tag=tag)
# static mappings with external interface
static_mappings = self.vapi.nat44_static_mapping_dump()
self.assertEqual(1, len(static_mappings))
self.assertEqual(self.pg7.sw_if_index,
static_mappings[0].external_sw_if_index)
self.assertEqual(static_mappings[0].tag, tag)
# configure interface address and check static mappings
self.pg7.config_ip4()
static_mappings = self.vapi.nat44_static_mapping_dump()
self.assertEqual(2, len(static_mappings))
resolved = False
for sm in static_mappings:
if sm.external_sw_if_index == 0xFFFFFFFF:
self.assertEqual(str(sm.external_ip_address),
self.pg7.local_ip4)
self.assertEqual(sm.tag, tag)
resolved = True
self.assertTrue(resolved)
# remove interface address and check static mappings
self.pg7.unconfig_ip4()
static_mappings = self.vapi.nat44_static_mapping_dump()
self.assertEqual(1, len(static_mappings))
self.assertEqual(self.pg7.sw_if_index,
static_mappings[0].external_sw_if_index)
self.assertEqual(static_mappings[0].tag, tag)
# configure interface address again and check static mappings
self.pg7.config_ip4()
static_mappings = self.vapi.nat44_static_mapping_dump()
self.assertEqual(2, len(static_mappings))
resolved = False
for sm in static_mappings:
if sm.external_sw_if_index == 0xFFFFFFFF:
self.assertEqual(str(sm.external_ip_address),
self.pg7.local_ip4)
self.assertEqual(sm.tag, tag)
resolved = True
self.assertTrue(resolved)
# remove static mapping
self.nat44_add_static_mapping(
'1.2.3.4',
external_sw_if_index=self.pg7.sw_if_index,
tag=tag,
is_add=0)
static_mappings = self.vapi.nat44_static_mapping_dump()
self.assertEqual(0, len(static_mappings))
def test_interface_addr_identity_nat(self):
""" NAT44EI Identity NAT with addresses from interface """
port = 53053
self.vapi.nat44_add_del_interface_addr(
is_add=1,
sw_if_index=self.pg7.sw_if_index)
self.vapi.nat44_add_del_identity_mapping(
ip_address=b'0',
sw_if_index=self.pg7.sw_if_index,
port=port,
protocol=IP_PROTOS.tcp,
is_add=1)
# identity mappings with external interface
identity_mappings = self.vapi.nat44_identity_mapping_dump()
self.assertEqual(1, len(identity_mappings))
self.assertEqual(self.pg7.sw_if_index,
identity_mappings[0].sw_if_index)
# configure interface address and check identity mappings
self.pg7.config_ip4()
identity_mappings = self.vapi.nat44_identity_mapping_dump()
resolved = False
self.assertEqual(2, len(identity_mappings))
for sm in identity_mappings:
if sm.sw_if_index == 0xFFFFFFFF:
self.assertEqual(str(identity_mappings[0].ip_address),
self.pg7.local_ip4)
self.assertEqual(port, identity_mappings[0].port)
self.assertEqual(IP_PROTOS.tcp, identity_mappings[0].protocol)
resolved = True
self.assertTrue(resolved)
# remove interface address and check identity mappings
self.pg7.unconfig_ip4()
identity_mappings = self.vapi.nat44_identity_mapping_dump()
self.assertEqual(1, len(identity_mappings))
self.assertEqual(self.pg7.sw_if_index,
identity_mappings[0].sw_if_index)
def test_ipfix_nat44_sess(self):
""" NAT44EI IPFIX logging NAT44EI session created/deleted """
self.ipfix_domain_id = 10
self.ipfix_src_port = 20202
collector_port = 30303
bind_layers(UDP, IPFIX, dport=30303)
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
src_address=self.pg3.local_ip4,
path_mtu=512,
template_interval=10,
collector_port=collector_port)
self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
src_port=self.ipfix_src_port,
enable=1)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
self.nat44_add_address(self.nat_addr, is_add=0)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
# first load template
for p in capture:
self.assertTrue(p.haslayer(IPFIX))
self.assertEqual(p[IP].src, self.pg3.local_ip4)
self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
self.assertEqual(p[UDP].sport, self.ipfix_src_port)
self.assertEqual(p[UDP].dport, collector_port)
self.assertEqual(p[IPFIX].observationDomainID,
self.ipfix_domain_id)
if p.haslayer(Template):
ipfix.add_template(p.getlayer(Template))
# verify events in data set
for p in capture:
if p.haslayer(Data):
data = ipfix.decode_data_set(p.getlayer(Set))
self.verify_ipfix_nat44_ses(data)
def test_ipfix_addr_exhausted(self):
""" NAT44EI IPFIX logging NAT addresses exhausted """
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
src_address=self.pg3.local_ip4,
path_mtu=512,
template_interval=10)
self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
src_port=self.ipfix_src_port,
enable=1)
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=3025))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
# first load template
for p in capture:
self.assertTrue(p.haslayer(IPFIX))
self.assertEqual(p[IP].src, self.pg3.local_ip4)
self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
self.assertEqual(p[UDP].sport, self.ipfix_src_port)
self.assertEqual(p[UDP].dport, 4739)
self.assertEqual(p[IPFIX].observationDomainID,
self.ipfix_domain_id)
if p.haslayer(Template):
ipfix.add_template(p.getlayer(Template))
# verify events in data set
for p in capture:
if p.haslayer(Data):
data = ipfix.decode_data_set(p.getlayer(Set))
self.verify_ipfix_addr_exhausted(data)
def test_ipfix_max_sessions(self):
""" NAT44EI IPFIX logging maximum session entries exceeded """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
max_sessions = self.max_translations
pkts = []
for i in range(0, max_sessions):
src = "10.10.%u.%u" % ((i & 0xFF00) >> 8, i & 0xFF)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=src, dst=self.pg1.remote_ip4) /
TCP(sport=1025))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.get_capture(max_sessions)
self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
src_address=self.pg3.local_ip4,
path_mtu=512,
template_interval=10)
self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
src_port=self.ipfix_src_port,
enable=1)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=1025))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
# first load template
for p in capture:
self.assertTrue(p.haslayer(IPFIX))
self.assertEqual(p[IP].src, self.pg3.local_ip4)
self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
self.assertEqual(p[UDP].sport, self.ipfix_src_port)
self.assertEqual(p[UDP].dport, 4739)
self.assertEqual(p[IPFIX].observationDomainID,
self.ipfix_domain_id)
if p.haslayer(Template):
ipfix.add_template(p.getlayer(Template))
# verify events in data set
for p in capture:
if p.haslayer(Data):
data = ipfix.decode_data_set(p.getlayer(Set))
self.verify_ipfix_max_sessions(data, max_sessions)
def test_syslog_apmap(self):
""" NAT44EI syslog address and port mapping creation and deletion """
self.vapi.syslog_set_filter(
self.SYSLOG_SEVERITY.SYSLOG_API_SEVERITY_INFO)
self.vapi.syslog_set_sender(self.pg3.local_ip4, self.pg3.remote_ip4)
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=self.tcp_port_in, dport=20))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
self.tcp_port_out = capture[0][TCP].sport
capture = self.pg3.get_capture(1)
self.verify_syslog_apmap(capture[0][Raw].load)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.nat44_add_address(self.nat_addr, is_add=0)
capture = self.pg3.get_capture(1)
self.verify_syslog_apmap(capture[0][Raw].load, False)
def test_pool_addr_fib(self):
""" NAT44EI add pool addresses to FIB """
static_addr = '10.0.0.10'
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.nat44_add_static_mapping(self.pg0.remote_ip4, static_addr)
# NAT44EI address
p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
ARP(op=ARP.who_has, pdst=self.nat_addr,
psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
self.assertTrue(capture[0].haslayer(ARP))
self.assertTrue(capture[0][ARP].op, ARP.is_at)
# 1:1 NAT address
p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
ARP(op=ARP.who_has, pdst=static_addr,
psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
self.assertTrue(capture[0].haslayer(ARP))
self.assertTrue(capture[0][ARP].op, ARP.is_at)
# send ARP to non-NAT44EI interface
p = (Ether(src=self.pg2.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
ARP(op=ARP.who_has, pdst=self.nat_addr,
psrc=self.pg2.remote_ip4, hwsrc=self.pg2.remote_mac))
self.pg2.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
# remove addresses and verify
self.nat44_add_address(self.nat_addr, is_add=0)
self.nat44_add_static_mapping(self.pg0.remote_ip4, static_addr,
is_add=0)
p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
ARP(op=ARP.who_has, pdst=self.nat_addr,
psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
ARP(op=ARP.who_has, pdst=static_addr,
psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
def test_vrf_mode(self):
""" NAT44EI tenant VRF aware address pool mode """
vrf_id1 = 1
vrf_id2 = 2
nat_ip1 = "10.0.0.10"
nat_ip2 = "10.0.0.11"
self.pg0.unconfig_ip4()
self.pg1.unconfig_ip4()
self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id1})
self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id2})
self.pg0.set_table_ip4(vrf_id1)
self.pg1.set_table_ip4(vrf_id2)
self.pg0.config_ip4()
self.pg1.config_ip4()
self.pg0.resolve_arp()
self.pg1.resolve_arp()
self.nat44_add_address(nat_ip1, vrf_id=vrf_id1)
self.nat44_add_address(nat_ip2, vrf_id=vrf_id2)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg2.sw_if_index,
is_add=1)
try:
# first VRF
pkts = self.create_stream_in(self.pg0, self.pg2)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip1)
# second VRF
pkts = self.create_stream_in(self.pg1, self.pg2)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip2)
finally:
self.pg0.unconfig_ip4()
self.pg1.unconfig_ip4()
self.pg0.set_table_ip4(0)
self.pg1.set_table_ip4(0)
self.pg0.config_ip4()
self.pg1.config_ip4()
self.pg0.resolve_arp()
self.pg1.resolve_arp()
self.vapi.ip_table_add_del(is_add=0, table={'table_id': vrf_id1})
self.vapi.ip_table_add_del(is_add=0, table={'table_id': vrf_id2})
def test_vrf_feature_independent(self):
""" NAT44EI tenant VRF independent address pool mode """
nat_ip1 = "10.0.0.10"
nat_ip2 = "10.0.0.11"
self.nat44_add_address(nat_ip1)
self.nat44_add_address(nat_ip2, vrf_id=99)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg2.sw_if_index,
is_add=1)
# first VRF
pkts = self.create_stream_in(self.pg0, self.pg2)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip1)
# second VRF
pkts = self.create_stream_in(self.pg1, self.pg2)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip1)
def test_dynamic_ipless_interfaces(self):
""" NAT44EI interfaces without configured IP address """
self.create_routes_and_neigbors()
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg7.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg8.sw_if_index,
is_add=1)
# in2out
pkts = self.create_stream_in(self.pg7, self.pg8)
self.pg7.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg8.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in
pkts = self.create_stream_out(self.pg8, self.nat_addr)
self.pg8.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg7.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg7)
def test_static_ipless_interfaces(self):
""" NAT44EI interfaces without configured IP address - 1:1 NAT """
self.create_routes_and_neigbors()
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg7.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg8.sw_if_index,
is_add=1)
# out2in
pkts = self.create_stream_out(self.pg8)
self.pg8.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg7.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg7)
# in2out
pkts = self.create_stream_in(self.pg7, self.pg8)
self.pg7.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg8.get_capture(len(pkts))
self.verify_capture_out(capture, self.nat_addr, True)
def test_static_with_port_ipless_interfaces(self):
""" NAT44EI interfaces without configured IP address - 1:1 NAPT """
self.tcp_port_out = 30606
self.udp_port_out = 30607
self.icmp_id_out = 30608
self.create_routes_and_neigbors()
self.nat44_add_address(self.nat_addr)
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
self.tcp_port_in, self.tcp_port_out,
proto=IP_PROTOS.tcp)
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
self.udp_port_in, self.udp_port_out,
proto=IP_PROTOS.udp)
self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
self.icmp_id_in, self.icmp_id_out,
proto=IP_PROTOS.icmp)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg7.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg8.sw_if_index,
is_add=1)
# out2in
pkts = self.create_stream_out(self.pg8)
self.pg8.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg7.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg7)
# in2out
pkts = self.create_stream_in(self.pg7, self.pg8)
self.pg7.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg8.get_capture(len(pkts))
self.verify_capture_out(capture)
def test_static_unknown_proto(self):
""" NAT44EI 1:1 translate packet with unknown protocol """
nat_ip = "10.0.0.10"
self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# in2out
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
GRE() /
IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4) /
TCP(sport=1234, dport=1234))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
p = self.pg1.get_capture(1)
packet = p[0]
try:
self.assertEqual(packet[IP].src, nat_ip)
self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
self.assertEqual(packet.haslayer(GRE), 1)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# out2in
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IP(src=self.pg1.remote_ip4, dst=nat_ip) /
GRE() /
IP(src=self.pg3.remote_ip4, dst=self.pg2.remote_ip4) /
TCP(sport=1234, dport=1234))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
p = self.pg0.get_capture(1)
packet = p[0]
try:
self.assertEqual(packet[IP].src, self.pg1.remote_ip4)
self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)
self.assertEqual(packet.haslayer(GRE), 1)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
def test_hairpinning_static_unknown_proto(self):
""" NAT44EI 1:1 translate packet with unknown protocol - hairpinning
"""
host = self.pg0.remote_hosts[0]
server = self.pg0.remote_hosts[1]
host_nat_ip = "10.0.0.10"
server_nat_ip = "10.0.0.11"
self.nat44_add_static_mapping(host.ip4, host_nat_ip)
self.nat44_add_static_mapping(server.ip4, server_nat_ip)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# host to server
p = (Ether(dst=self.pg0.local_mac, src=host.mac) /
IP(src=host.ip4, dst=server_nat_ip) /
GRE() /
IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4) /
TCP(sport=1234, dport=1234))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
p = self.pg0.get_capture(1)
packet = p[0]
try:
self.assertEqual(packet[IP].src, host_nat_ip)
self.assertEqual(packet[IP].dst, server.ip4)
self.assertEqual(packet.haslayer(GRE), 1)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# server to host
p = (Ether(dst=self.pg0.local_mac, src=server.mac) /
IP(src=server.ip4, dst=host_nat_ip) /
GRE() /
IP(src=self.pg3.remote_ip4, dst=self.pg2.remote_ip4) /
TCP(sport=1234, dport=1234))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
p = self.pg0.get_capture(1)
packet = p[0]
try:
self.assertEqual(packet[IP].src, server_nat_ip)
self.assertEqual(packet[IP].dst, host.ip4)
self.assertEqual(packet.haslayer(GRE), 1)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
def test_output_feature(self):
""" NAT44EI output feature (in2out postrouting) """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_output_feature(
is_add=1, flags=flags,
sw_if_index=self.pg0.sw_if_index)
self.vapi.nat44_interface_add_del_output_feature(
is_add=1, flags=flags,
sw_if_index=self.pg1.sw_if_index)
self.vapi.nat44_interface_add_del_output_feature(
is_add=1,
sw_if_index=self.pg3.sw_if_index)
# in2out
pkts = self.create_stream_in(self.pg0, self.pg3)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture)
# out2in
pkts = self.create_stream_out(self.pg3)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
# from non-NAT interface to NAT inside interface
pkts = self.create_stream_in(self.pg2, self.pg0)
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_no_translation(capture, self.pg2, self.pg0)
def test_output_feature_vrf_aware(self):
""" NAT44EI output feature VRF aware (in2out postrouting) """
nat_ip_vrf10 = "10.0.0.10"
nat_ip_vrf20 = "10.0.0.20"
r1 = VppIpRoute(self, self.pg3.remote_ip4, 32,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)],
table_id=10)
r2 = VppIpRoute(self, self.pg3.remote_ip4, 32,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)],
table_id=20)
r1.add_vpp_config()
r2.add_vpp_config()
self.nat44_add_address(nat_ip_vrf10, vrf_id=10)
self.nat44_add_address(nat_ip_vrf20, vrf_id=20)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_output_feature(
is_add=1, flags=flags,
sw_if_index=self.pg4.sw_if_index)
self.vapi.nat44_interface_add_del_output_feature(
is_add=1, flags=flags,
sw_if_index=self.pg6.sw_if_index)
self.vapi.nat44_interface_add_del_output_feature(
is_add=1,
sw_if_index=self.pg3.sw_if_index)
# in2out VRF 10
pkts = self.create_stream_in(self.pg4, self.pg3)
self.pg4.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip=nat_ip_vrf10)
# out2in VRF 10
pkts = self.create_stream_out(self.pg3, dst_ip=nat_ip_vrf10)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg4.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg4)
# in2out VRF 20
pkts = self.create_stream_in(self.pg6, self.pg3)
self.pg6.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg3.get_capture(len(pkts))
self.verify_capture_out(capture, nat_ip=nat_ip_vrf20)
# out2in VRF 20
pkts = self.create_stream_out(self.pg3, dst_ip=nat_ip_vrf20)
self.pg3.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg6.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg6)
def test_output_feature_hairpinning(self):
""" NAT44EI output feature hairpinning (in2out postrouting) """
host = self.pg0.remote_hosts[0]
server = self.pg0.remote_hosts[1]
host_in_port = 1234
host_out_port = 0
server_in_port = 5678
server_out_port = 8765
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_output_feature(
is_add=1, flags=flags,
sw_if_index=self.pg0.sw_if_index)
self.vapi.nat44_interface_add_del_output_feature(
is_add=1,
sw_if_index=self.pg1.sw_if_index)
# add static mapping for server
self.nat44_add_static_mapping(server.ip4, self.nat_addr,
server_in_port, server_out_port,
proto=IP_PROTOS.tcp)
# send packet from host to server
p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
IP(src=host.ip4, dst=self.nat_addr) /
TCP(sport=host_in_port, dport=server_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, server.ip4)
self.assertNotEqual(tcp.sport, host_in_port)
self.assertEqual(tcp.dport, server_in_port)
self.assert_packet_checksums_valid(p)
host_out_port = tcp.sport
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
# send reply from server to host
p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
IP(src=server.ip4, dst=self.nat_addr) /
TCP(sport=server_in_port, dport=host_out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, host.ip4)
self.assertEqual(tcp.sport, server_out_port)
self.assertEqual(tcp.dport, host_in_port)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
def test_one_armed_nat44(self):
""" NAT44EI One armed NAT """
remote_host = self.pg9.remote_hosts[0]
local_host = self.pg9.remote_hosts[1]
external_port = 0
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg9.sw_if_index,
is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg9.sw_if_index,
flags=flags, is_add=1)
# in2out
p = (Ether(src=self.pg9.remote_mac, dst=self.pg9.local_mac) /
IP(src=local_host.ip4, dst=remote_host.ip4) /
TCP(sport=12345, dport=80))
self.pg9.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg9.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(ip.dst, remote_host.ip4)
self.assertNotEqual(tcp.sport, 12345)
external_port = tcp.sport
self.assertEqual(tcp.dport, 80)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
# out2in
p = (Ether(src=self.pg9.remote_mac, dst=self.pg9.local_mac) /
IP(src=remote_host.ip4, dst=self.nat_addr) /
TCP(sport=80, dport=external_port))
self.pg9.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg9.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.src, remote_host.ip4)
self.assertEqual(ip.dst, local_host.ip4)
self.assertEqual(tcp.sport, 80)
self.assertEqual(tcp.dport, 12345)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
err = self.statistics.get_err_counter(
'/err/nat44-classify/next in2out')
self.assertEqual(err, 1)
err = self.statistics.get_err_counter(
'/err/nat44-classify/next out2in')
self.assertEqual(err, 1)
def test_del_session(self):
""" NAT44EI delete session """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.get_capture(len(pkts))
sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
nsessions = len(sessions)
self.vapi.nat44_del_session(address=sessions[0].inside_ip_address,
port=sessions[0].inside_port,
protocol=sessions[0].protocol,
flags=self.config_flags.NAT_IS_INSIDE)
self.vapi.nat44_del_session(address=sessions[1].outside_ip_address,
port=sessions[1].outside_port,
protocol=sessions[1].protocol)
sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
self.assertEqual(nsessions - len(sessions), 2)
self.vapi.nat44_del_session(address=sessions[0].inside_ip_address,
port=sessions[0].inside_port,
protocol=sessions[0].protocol,
flags=self.config_flags.NAT_IS_INSIDE)
self.verify_no_nat44_user()
def test_frag_in_order(self):
""" NAT44EI translate fragments arriving in order """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.frag_in_order(proto=IP_PROTOS.tcp)
self.frag_in_order(proto=IP_PROTOS.udp)
self.frag_in_order(proto=IP_PROTOS.icmp)
def test_frag_forwarding(self):
""" NAT44EI forwarding fragment test """
self.vapi.nat44_add_del_interface_addr(
is_add=1,
sw_if_index=self.pg1.sw_if_index)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat44_forwarding_enable_disable(enable=1)
data = b"A" * 16 + b"B" * 16 + b"C" * 3
pkts = self.create_stream_frag(self.pg1,
self.pg0.remote_ip4,
4789,
4789,
data,
proto=IP_PROTOS.udp)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
frags = self.pg0.get_capture(len(pkts))
p = self.reass_frags_and_verify(frags,
self.pg1.remote_ip4,
self.pg0.remote_ip4)
self.assertEqual(p[UDP].sport, 4789)
self.assertEqual(p[UDP].dport, 4789)
self.assertEqual(data, p[Raw].load)
def test_reass_hairpinning(self):
""" NAT44EI fragments hairpinning """
server_addr = self.pg0.remote_hosts[1].ip4
host_in_port = random.randint(1025, 65535)
server_in_port = random.randint(1025, 65535)
server_out_port = random.randint(1025, 65535)
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
# add static mapping for server
self.nat44_add_static_mapping(server_addr, self.nat_addr,
server_in_port,
server_out_port,
proto=IP_PROTOS.tcp)
self.nat44_add_static_mapping(server_addr, self.nat_addr,
server_in_port,
server_out_port,
proto=IP_PROTOS.udp)
self.nat44_add_static_mapping(server_addr, self.nat_addr)
self.reass_hairpinning(server_addr, server_in_port, server_out_port,
host_in_port, proto=IP_PROTOS.tcp)
self.reass_hairpinning(server_addr, server_in_port, server_out_port,
host_in_port, proto=IP_PROTOS.udp)
self.reass_hairpinning(server_addr, server_in_port, server_out_port,
host_in_port, proto=IP_PROTOS.icmp)
def test_frag_out_of_order(self):
""" NAT44EI translate fragments arriving out of order """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.frag_out_of_order(proto=IP_PROTOS.tcp)
self.frag_out_of_order(proto=IP_PROTOS.udp)
self.frag_out_of_order(proto=IP_PROTOS.icmp)
def test_port_restricted(self):
""" NAT44EI Port restricted NAT44EI (MAP-E CE) """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat_set_addr_and_port_alloc_alg(alg=1,
psid_offset=6,
psid_length=6,
psid=10)
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=4567, dport=22))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
self.assertEqual(ip.dst, self.pg1.remote_ip4)
self.assertEqual(ip.src, self.nat_addr)
self.assertEqual(tcp.dport, 22)
self.assertNotEqual(tcp.sport, 4567)
self.assertEqual((tcp.sport >> 6) & 63, 10)
self.assert_packet_checksums_valid(p)
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
def test_port_range(self):
""" NAT44EI External address port range """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat_set_addr_and_port_alloc_alg(alg=2,
start_port=1025,
end_port=1027)
pkts = []
for port in range(0, 5):
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=1125 + port))
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(3)
for p in capture:
tcp = p[TCP]
self.assertGreaterEqual(tcp.sport, 1025)
self.assertLessEqual(tcp.sport, 1027)
def test_multiple_outside_vrf(self):
""" NAT44EI Multiple outside VRF """
vrf_id1 = 1
vrf_id2 = 2
self.pg1.unconfig_ip4()
self.pg2.unconfig_ip4()
self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id1})
self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id2})
self.pg1.set_table_ip4(vrf_id1)
self.pg2.set_table_ip4(vrf_id2)
self.pg1.config_ip4()
self.pg2.config_ip4()
self.pg1.resolve_arp()
self.pg2.resolve_arp()
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg2.sw_if_index,
is_add=1)
try:
# first VRF
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture, self.nat_addr)
pkts = self.create_stream_out(self.pg1, self.nat_addr)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
self.tcp_port_in = 60303
self.udp_port_in = 60304
self.icmp_id_in = 60305
# second VRF
pkts = self.create_stream_in(self.pg0, self.pg2)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg2.get_capture(len(pkts))
self.verify_capture_out(capture, self.nat_addr)
pkts = self.create_stream_out(self.pg2, self.nat_addr)
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
finally:
self.nat44_add_address(self.nat_addr, is_add=0)
self.pg1.unconfig_ip4()
self.pg2.unconfig_ip4()
self.pg1.set_table_ip4(0)
self.pg2.set_table_ip4(0)
self.pg1.config_ip4()
self.pg2.config_ip4()
self.pg1.resolve_arp()
self.pg2.resolve_arp()
def test_mss_clamping(self):
""" NAT44EI TCP MSS clamping """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
flags="S", options=[('MSS', 1400)]))
self.vapi.nat_set_mss_clamping(enable=1, mss_value=1000)
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
# Negotiated MSS value greater than configured - changed
self.verify_mss_value(capture[0], 1000)
self.vapi.nat_set_mss_clamping(enable=0, mss_value=1500)
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
# MSS clamping disabled - negotiated MSS unchanged
self.verify_mss_value(capture[0], 1400)
self.vapi.nat_set_mss_clamping(enable=1, mss_value=1500)
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
# Negotiated MSS value smaller than configured - unchanged
self.verify_mss_value(capture[0], 1400)
def test_ha_send(self):
""" NAT44EI Send HA session synchronization events (active) """
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.nat44_add_address(self.nat_addr)
self.vapi.nat_ha_set_listener(ip_address=self.pg3.local_ip4,
port=12345,
path_mtu=512)
self.vapi.nat_ha_set_failover(ip_address=self.pg3.remote_ip4,
port=12346, session_refresh_interval=10)
bind_layers(UDP, HANATStateSync, sport=12345)
# create sessions
pkts = self.create_stream_in(self.pg0, self.pg1)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out(capture)
# active send HA events
self.vapi.nat_ha_flush()
stats = self.statistics.get_counter('/nat44/ha/add-event-send')
self.assertEqual(stats[0][0], 3)
capture = self.pg3.get_capture(1)
p = capture[0]
self.assert_packet_checksums_valid(p)
try:
ip = p[IP]
udp = p[UDP]
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(ip.src, self.pg3.local_ip4)
self.assertEqual(ip.dst, self.pg3.remote_ip4)
self.assertEqual(udp.sport, 12345)
self.assertEqual(udp.dport, 12346)
self.assertEqual(hanat.version, 1)
self.assertEqual(hanat.thread_index, 0)
self.assertEqual(hanat.count, 3)
seq = hanat.sequence_number
for event in hanat.events:
self.assertEqual(event.event_type, 1)
self.assertEqual(event.in_addr, self.pg0.remote_ip4)
self.assertEqual(event.out_addr, self.nat_addr)
self.assertEqual(event.fib_index, 0)
# ACK received events
ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
UDP(sport=12346, dport=12345) /
HANATStateSync(sequence_number=seq, flags='ACK'))
self.pg3.add_stream(ack)
self.pg_start()
stats = self.statistics.get_counter('/nat44/ha/ack-recv')
self.assertEqual(stats[0][0], 1)
# delete one session
self.pg_enable_capture(self.pg_interfaces)
self.vapi.nat44_del_session(address=self.pg0.remote_ip4,
port=self.tcp_port_in,
protocol=IP_PROTOS.tcp,
flags=self.config_flags.NAT_IS_INSIDE)
self.vapi.nat_ha_flush()
stats = self.statistics.get_counter('/nat44/ha/del-event-send')
self.assertEqual(stats[0][0], 1)
capture = self.pg3.get_capture(1)
p = capture[0]
try:
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertGreater(hanat.sequence_number, seq)
# do not send ACK, active retry send HA event again
self.pg_enable_capture(self.pg_interfaces)
sleep(12)
stats = self.statistics.get_counter('/nat44/ha/retry-count')
self.assertEqual(stats[0][0], 3)
stats = self.statistics.get_counter('/nat44/ha/missed-count')
self.assertEqual(stats[0][0], 1)
capture = self.pg3.get_capture(3)
for packet in capture:
self.assertEqual(packet, p)
# session counters refresh
pkts = self.create_stream_out(self.pg1)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(2)
self.vapi.nat_ha_flush()
stats = self.statistics.get_counter('/nat44/ha/refresh-event-send')
self.assertEqual(stats[0][0], 2)
capture = self.pg3.get_capture(1)
p = capture[0]
self.assert_packet_checksums_valid(p)
try:
ip = p[IP]
udp = p[UDP]
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(ip.src, self.pg3.local_ip4)
self.assertEqual(ip.dst, self.pg3.remote_ip4)
self.assertEqual(udp.sport, 12345)
self.assertEqual(udp.dport, 12346)
self.assertEqual(hanat.version, 1)
self.assertEqual(hanat.count, 2)
seq = hanat.sequence_number
for event in hanat.events:
self.assertEqual(event.event_type, 3)
self.assertEqual(event.out_addr, self.nat_addr)
self.assertEqual(event.fib_index, 0)
self.assertEqual(event.total_pkts, 2)
self.assertGreater(event.total_bytes, 0)
ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
UDP(sport=12346, dport=12345) /
HANATStateSync(sequence_number=seq, flags='ACK'))
self.pg3.add_stream(ack)
self.pg_start()
stats = self.statistics.get_counter('/nat44/ha/ack-recv')
self.assertEqual(stats[0][0], 2)
def test_ha_recv(self):
""" NAT44EI Receive HA session synchronization events (passive) """
self.nat44_add_address(self.nat_addr)
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg1.sw_if_index,
is_add=1)
self.vapi.nat_ha_set_listener(ip_address=self.pg3.local_ip4,
port=12345,
path_mtu=512)
bind_layers(UDP, HANATStateSync, sport=12345)
self.tcp_port_out = random.randint(1025, 65535)
self.udp_port_out = random.randint(1025, 65535)
# send HA session add events to failover/passive
p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
UDP(sport=12346, dport=12345) /
HANATStateSync(sequence_number=1, events=[
Event(event_type='add', protocol='tcp',
in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
in_port=self.tcp_port_in, out_port=self.tcp_port_out,
eh_addr=self.pg1.remote_ip4,
ehn_addr=self.pg1.remote_ip4,
eh_port=self.tcp_external_port,
ehn_port=self.tcp_external_port, fib_index=0),
Event(event_type='add', protocol='udp',
in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
in_port=self.udp_port_in, out_port=self.udp_port_out,
eh_addr=self.pg1.remote_ip4,
ehn_addr=self.pg1.remote_ip4,
eh_port=self.udp_external_port,
ehn_port=self.udp_external_port, fib_index=0)]))
self.pg3.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# receive ACK
capture = self.pg3.get_capture(1)
p = capture[0]
try:
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(hanat.sequence_number, 1)
self.assertEqual(hanat.flags, 'ACK')
self.assertEqual(hanat.version, 1)
self.assertEqual(hanat.thread_index, 0)
stats = self.statistics.get_counter('/nat44/ha/ack-send')
self.assertEqual(stats[0][0], 1)
stats = self.statistics.get_counter('/nat44/ha/add-event-recv')
self.assertEqual(stats[0][0], 2)
users = self.statistics.get_counter('/nat44/total-users')
self.assertEqual(users[0][0], 1)
sessions = self.statistics.get_counter('/nat44/total-sessions')
self.assertEqual(sessions[0][0], 2)
users = self.vapi.nat44_user_dump()
self.assertEqual(len(users), 1)
self.assertEqual(str(users[0].ip_address),
self.pg0.remote_ip4)
# there should be 2 sessions created by HA
sessions = self.vapi.nat44_user_session_dump(users[0].ip_address,
users[0].vrf_id)
self.assertEqual(len(sessions), 2)
for session in sessions:
self.assertEqual(str(session.inside_ip_address),
self.pg0.remote_ip4)
self.assertEqual(str(session.outside_ip_address),
self.nat_addr)
self.assertIn(session.inside_port,
[self.tcp_port_in, self.udp_port_in])
self.assertIn(session.outside_port,
[self.tcp_port_out, self.udp_port_out])
self.assertIn(session.protocol, [IP_PROTOS.tcp, IP_PROTOS.udp])
# send HA session delete event to failover/passive
p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
UDP(sport=12346, dport=12345) /
HANATStateSync(sequence_number=2, events=[
Event(event_type='del', protocol='udp',
in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
in_port=self.udp_port_in, out_port=self.udp_port_out,
eh_addr=self.pg1.remote_ip4,
ehn_addr=self.pg1.remote_ip4,
eh_port=self.udp_external_port,
ehn_port=self.udp_external_port, fib_index=0)]))
self.pg3.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# receive ACK
capture = self.pg3.get_capture(1)
p = capture[0]
try:
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(hanat.sequence_number, 2)
self.assertEqual(hanat.flags, 'ACK')
self.assertEqual(hanat.version, 1)
users = self.vapi.nat44_user_dump()
self.assertEqual(len(users), 1)
self.assertEqual(str(users[0].ip_address),
self.pg0.remote_ip4)
# now we should have only 1 session, 1 deleted by HA
sessions = self.vapi.nat44_user_session_dump(users[0].ip_address,
users[0].vrf_id)
self.assertEqual(len(sessions), 1)
stats = self.statistics.get_counter('/nat44/ha/del-event-recv')
self.assertEqual(stats[0][0], 1)
stats = self.statistics.get_err_counter('/err/nat-ha/pkts-processed')
self.assertEqual(stats, 2)
# send HA session refresh event to failover/passive
p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
UDP(sport=12346, dport=12345) /
HANATStateSync(sequence_number=3, events=[
Event(event_type='refresh', protocol='tcp',
in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
in_port=self.tcp_port_in, out_port=self.tcp_port_out,
eh_addr=self.pg1.remote_ip4,
ehn_addr=self.pg1.remote_ip4,
eh_port=self.tcp_external_port,
ehn_port=self.tcp_external_port, fib_index=0,
total_bytes=1024, total_pkts=2)]))
self.pg3.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# receive ACK
capture = self.pg3.get_capture(1)
p = capture[0]
try:
hanat = p[HANATStateSync]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(hanat.sequence_number, 3)
self.assertEqual(hanat.flags, 'ACK')
self.assertEqual(hanat.version, 1)
users = self.vapi.nat44_user_dump()
self.assertEqual(len(users), 1)
self.assertEqual(str(users[0].ip_address),
self.pg0.remote_ip4)
sessions = self.vapi.nat44_user_session_dump(users[0].ip_address,
users[0].vrf_id)
self.assertEqual(len(sessions), 1)
session = sessions[0]
self.assertEqual(session.total_bytes, 1024)
self.assertEqual(session.total_pkts, 2)
stats = self.statistics.get_counter('/nat44/ha/refresh-event-recv')
self.assertEqual(stats[0][0], 1)
stats = self.statistics.get_err_counter('/err/nat-ha/pkts-processed')
self.assertEqual(stats, 3)
# send packet to test session created by HA
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
TCP(sport=self.tcp_external_port, dport=self.tcp_port_out))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
p = capture[0]
try:
ip = p[IP]
tcp = p[TCP]
except IndexError:
self.logger.error(ppp("Invalid packet:", p))
raise
else:
self.assertEqual(ip.src, self.pg1.remote_ip4)
self.assertEqual(ip.dst, self.pg0.remote_ip4)
self.assertEqual(tcp.sport, self.tcp_external_port)
self.assertEqual(tcp.dport, self.tcp_port_in)
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show nat44 addresses"))
self.logger.info(self.vapi.cli("show nat44 interfaces"))
self.logger.info(self.vapi.cli("show nat44 static mappings"))
self.logger.info(self.vapi.cli("show nat44 interface address"))
self.logger.info(self.vapi.cli("show nat44 sessions detail"))
self.logger.info(self.vapi.cli("show nat44 hash tables detail"))
self.logger.info(self.vapi.cli("show nat timeouts"))
self.logger.info(
self.vapi.cli("show nat addr-port-assignment-alg"))
self.logger.info(self.vapi.cli("show nat ha"))
class TestNAT44Out2InDPO(MethodHolder):
""" NAT44EI Test Cases using out2in DPO """
@classmethod
def setUpClass(cls):
super(TestNAT44Out2InDPO, cls).setUpClass()
cls.vapi.cli("set log class nat level debug")
cls.tcp_port_in = 6303
cls.tcp_port_out = 6303
cls.udp_port_in = 6304
cls.udp_port_out = 6304
cls.icmp_id_in = 6305
cls.icmp_id_out = 6305
cls.nat_addr = '10.0.0.3'
cls.dst_ip4 = '192.168.70.1'
cls.create_pg_interfaces(range(2))
cls.pg0.admin_up()
cls.pg0.config_ip4()
cls.pg0.resolve_arp()
cls.pg1.admin_up()
cls.pg1.config_ip6()
cls.pg1.resolve_ndp()
r1 = VppIpRoute(cls, "::", 0,
[VppRoutePath(cls.pg1.remote_ip6,
cls.pg1.sw_if_index)],
register=False)
r1.add_vpp_config()
def setUp(self):
super(TestNAT44Out2InDPO, self).setUp()
flags = self.nat44_config_flags.NAT44_API_IS_OUT2IN_DPO
self.vapi.nat44_plugin_enable_disable(enable=1, flags=flags)
def tearDown(self):
super(TestNAT44Out2InDPO, self).tearDown()
if not self.vpp_dead:
self.vapi.nat44_plugin_enable_disable(enable=0)
self.vapi.cli("clear logging")
def configure_xlat(self):
self.dst_ip6_pfx = '1:2:3::'
self.dst_ip6_pfx_n = socket.inet_pton(socket.AF_INET6,
self.dst_ip6_pfx)
self.dst_ip6_pfx_len = 96
self.src_ip6_pfx = '4:5:6::'
self.src_ip6_pfx_n = socket.inet_pton(socket.AF_INET6,
self.src_ip6_pfx)
self.src_ip6_pfx_len = 96
self.vapi.map_add_domain(self.dst_ip6_pfx_n, self.dst_ip6_pfx_len,
self.src_ip6_pfx_n, self.src_ip6_pfx_len,
'\x00\x00\x00\x00', 0)
@unittest.skip('Temporary disabled')
def test_464xlat_ce(self):
""" Test 464XLAT CE with NAT44EI """
nat_config = self.vapi.nat_show_config()
self.assertEqual(1, nat_config.out2in_dpo)
self.configure_xlat()
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags, is_add=1)
self.vapi.nat44_add_del_address_range(first_ip_address=self.nat_addr_n,
last_ip_address=self.nat_addr_n,
vrf_id=0xFFFFFFFF, is_add=1)
out_src_ip6 = self.compose_ip6(self.dst_ip4, self.dst_ip6_pfx,
self.dst_ip6_pfx_len)
out_dst_ip6 = self.compose_ip6(self.nat_addr, self.src_ip6_pfx,
self.src_ip6_pfx_len)
try:
pkts = self.create_stream_in(self.pg0, self.pg1, self.dst_ip4)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out_ip6(capture, nat_ip=out_dst_ip6,
dst_ip=out_src_ip6)
pkts = self.create_stream_out_ip6(self.pg1, out_src_ip6,
out_dst_ip6)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
finally:
self.vapi.nat44_interface_add_del_feature(
sw_if_index=self.pg0.sw_if_index,
flags=flags)
self.vapi.nat44_add_del_address_range(
first_ip_address=self.nat_addr_n,
last_ip_address=self.nat_addr_n,
vrf_id=0xFFFFFFFF)
@unittest.skip('Temporary disabled')
def test_464xlat_ce_no_nat(self):
""" Test 464XLAT CE without NAT44EI """
self.configure_xlat()
out_src_ip6 = self.compose_ip6(self.dst_ip4, self.dst_ip6_pfx,
self.dst_ip6_pfx_len)
out_dst_ip6 = self.compose_ip6(self.pg0.remote_ip4, self.src_ip6_pfx,
self.src_ip6_pfx_len)
pkts = self.create_stream_in(self.pg0, self.pg1, self.dst_ip4)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
self.verify_capture_out_ip6(capture, dst_ip=out_src_ip6,
nat_ip=out_dst_ip6, same_port=True)
pkts = self.create_stream_out_ip6(self.pg1, out_src_ip6, out_dst_ip6)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
self.verify_capture_in(capture, self.pg0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 40.887933
| 79
| 0.590087
| 20,314
| 156,887
| 4.293148
| 0.033179
| 0.024355
| 0.027038
| 0.019229
| 0.837658
| 0.798064
| 0.774925
| 0.750742
| 0.727053
| 0.696369
| 0
| 0.037307
| 0.311116
| 156,887
| 3,836
| 80
| 40.898592
| 0.769627
| 0.062988
| 0
| 0.728939
| 0
| 0
| 0.024245
| 0.00585
| 0
| 0
| 0.000824
| 0
| 0.12926
| 1
| 0.028939
| false
| 0
| 0.007396
| 0.001286
| 0.044373
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16cf46558728da781ea559f1313048ca9c49048d
| 33,335
|
py
|
Python
|
sdk/python/pulumi_azure/iot/endpoint_storage_container.py
|
pulumi/pulumi-azure
|
c62b6c1828de1facfd0d92425b72e22e229b0afc
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/iot/endpoint_storage_container.py
|
pulumi/pulumi-azure
|
c62b6c1828de1facfd0d92425b72e22e229b0afc
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/iot/endpoint_storage_container.py
|
pulumi/pulumi-azure
|
c62b6c1828de1facfd0d92425b72e22e229b0afc
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['EndpointStorageContainerArgs', 'EndpointStorageContainer']
@pulumi.input_type
class EndpointStorageContainerArgs:
def __init__(__self__, *,
connection_string: pulumi.Input[str],
container_name: pulumi.Input[str],
iothub_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
batch_frequency_in_seconds: Optional[pulumi.Input[int]] = None,
encoding: Optional[pulumi.Input[str]] = None,
file_name_format: Optional[pulumi.Input[str]] = None,
max_chunk_size_in_bytes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EndpointStorageContainer resource.
:param pulumi.Input[str] connection_string: The connection string for the endpoint.
:param pulumi.Input[str] container_name: The name of storage container in the storage account.
*
:param pulumi.Input[str] iothub_name: The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[int] batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
:param pulumi.Input[str] encoding: Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
:param pulumi.Input[str] file_name_format: File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
:param pulumi.Input[int] max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:param pulumi.Input[str] name: The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
"""
pulumi.set(__self__, "connection_string", connection_string)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "iothub_name", iothub_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if batch_frequency_in_seconds is not None:
pulumi.set(__self__, "batch_frequency_in_seconds", batch_frequency_in_seconds)
if encoding is not None:
pulumi.set(__self__, "encoding", encoding)
if file_name_format is not None:
pulumi.set(__self__, "file_name_format", file_name_format)
if max_chunk_size_in_bytes is not None:
pulumi.set(__self__, "max_chunk_size_in_bytes", max_chunk_size_in_bytes)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Input[str]:
"""
The connection string for the endpoint.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: pulumi.Input[str]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
The name of storage container in the storage account.
*
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="iothubName")
def iothub_name(self) -> pulumi.Input[str]:
"""
The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_name")
@iothub_name.setter
def iothub_name(self, value: pulumi.Input[str]):
pulumi.set(self, "iothub_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="batchFrequencyInSeconds")
def batch_frequency_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
"""
return pulumi.get(self, "batch_frequency_in_seconds")
@batch_frequency_in_seconds.setter
def batch_frequency_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_frequency_in_seconds", value)
@property
@pulumi.getter
def encoding(self) -> Optional[pulumi.Input[str]]:
"""
Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "encoding")
@encoding.setter
def encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encoding", value)
@property
@pulumi.getter(name="fileNameFormat")
def file_name_format(self) -> Optional[pulumi.Input[str]]:
"""
File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
"""
return pulumi.get(self, "file_name_format")
@file_name_format.setter
def file_name_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_name_format", value)
@property
@pulumi.getter(name="maxChunkSizeInBytes")
def max_chunk_size_in_bytes(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
"""
return pulumi.get(self, "max_chunk_size_in_bytes")
@max_chunk_size_in_bytes.setter
def max_chunk_size_in_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_chunk_size_in_bytes", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _EndpointStorageContainerState:
def __init__(__self__, *,
batch_frequency_in_seconds: Optional[pulumi.Input[int]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
encoding: Optional[pulumi.Input[str]] = None,
file_name_format: Optional[pulumi.Input[str]] = None,
iothub_name: Optional[pulumi.Input[str]] = None,
max_chunk_size_in_bytes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EndpointStorageContainer resources.
:param pulumi.Input[int] batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
:param pulumi.Input[str] connection_string: The connection string for the endpoint.
:param pulumi.Input[str] container_name: The name of storage container in the storage account.
*
:param pulumi.Input[str] encoding: Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
:param pulumi.Input[str] file_name_format: File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
:param pulumi.Input[str] iothub_name: The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
:param pulumi.Input[int] max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:param pulumi.Input[str] name: The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
if batch_frequency_in_seconds is not None:
pulumi.set(__self__, "batch_frequency_in_seconds", batch_frequency_in_seconds)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if encoding is not None:
pulumi.set(__self__, "encoding", encoding)
if file_name_format is not None:
pulumi.set(__self__, "file_name_format", file_name_format)
if iothub_name is not None:
pulumi.set(__self__, "iothub_name", iothub_name)
if max_chunk_size_in_bytes is not None:
pulumi.set(__self__, "max_chunk_size_in_bytes", max_chunk_size_in_bytes)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="batchFrequencyInSeconds")
def batch_frequency_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
"""
return pulumi.get(self, "batch_frequency_in_seconds")
@batch_frequency_in_seconds.setter
def batch_frequency_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "batch_frequency_in_seconds", value)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
"""
The connection string for the endpoint.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of storage container in the storage account.
*
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def encoding(self) -> Optional[pulumi.Input[str]]:
"""
Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "encoding")
@encoding.setter
def encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encoding", value)
@property
@pulumi.getter(name="fileNameFormat")
def file_name_format(self) -> Optional[pulumi.Input[str]]:
"""
File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
"""
return pulumi.get(self, "file_name_format")
@file_name_format.setter
def file_name_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_name_format", value)
@property
@pulumi.getter(name="iothubName")
def iothub_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_name")
@iothub_name.setter
def iothub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iothub_name", value)
@property
@pulumi.getter(name="maxChunkSizeInBytes")
def max_chunk_size_in_bytes(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
"""
return pulumi.get(self, "max_chunk_size_in_bytes")
@max_chunk_size_in_bytes.setter
def max_chunk_size_in_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_chunk_size_in_bytes", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
class EndpointStorageContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
batch_frequency_in_seconds: Optional[pulumi.Input[int]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
encoding: Optional[pulumi.Input[str]] = None,
file_name_format: Optional[pulumi.Input[str]] = None,
iothub_name: Optional[pulumi.Input[str]] = None,
max_chunk_size_in_bytes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an IotHub Storage Container Endpoint
> **NOTE:** Endpoints can be defined either directly on the `iot.IoTHub` resource, or using the `azurerm_iothub_endpoint_*` resources - but the two ways of defining the endpoints cannot be used together. If both are used against the same IoTHub, spurious changes will occur. Also, defining a `azurerm_iothub_endpoint_*` resource and another endpoint of a different type directly on the `iot.IoTHub` resource is not supported.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_io_t_hub = azure.iot.IoTHub("exampleIoTHub",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku=azure.iot.IoTHubSkuArgs(
name="S1",
capacity=1,
))
example_endpoint_storage_container = azure.iot.EndpointStorageContainer("exampleEndpointStorageContainer",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
container_name="acctestcont",
connection_string=example_account.primary_blob_connection_string,
file_name_format="{iothub}/{partition}_{YYYY}_{MM}_{DD}_{HH}_{mm}",
batch_frequency_in_seconds=60,
max_chunk_size_in_bytes=10485760,
encoding="JSON")
```
## Import
IoTHub Storage Container Endpoint can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/endpointStorageContainer:EndpointStorageContainer storage_container1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Devices/IotHubs/hub1/Endpoints/storage_container_endpoint1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
:param pulumi.Input[str] connection_string: The connection string for the endpoint.
:param pulumi.Input[str] container_name: The name of storage container in the storage account.
*
:param pulumi.Input[str] encoding: Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
:param pulumi.Input[str] file_name_format: File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
:param pulumi.Input[str] iothub_name: The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
:param pulumi.Input[int] max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:param pulumi.Input[str] name: The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EndpointStorageContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an IotHub Storage Container Endpoint
> **NOTE:** Endpoints can be defined either directly on the `iot.IoTHub` resource, or using the `azurerm_iothub_endpoint_*` resources - but the two ways of defining the endpoints cannot be used together. If both are used against the same IoTHub, spurious changes will occur. Also, defining a `azurerm_iothub_endpoint_*` resource and another endpoint of a different type directly on the `iot.IoTHub` resource is not supported.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_io_t_hub = azure.iot.IoTHub("exampleIoTHub",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
sku=azure.iot.IoTHubSkuArgs(
name="S1",
capacity=1,
))
example_endpoint_storage_container = azure.iot.EndpointStorageContainer("exampleEndpointStorageContainer",
resource_group_name=example_resource_group.name,
iothub_name=example_io_t_hub.name,
container_name="acctestcont",
connection_string=example_account.primary_blob_connection_string,
file_name_format="{iothub}/{partition}_{YYYY}_{MM}_{DD}_{HH}_{mm}",
batch_frequency_in_seconds=60,
max_chunk_size_in_bytes=10485760,
encoding="JSON")
```
## Import
IoTHub Storage Container Endpoint can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/endpointStorageContainer:EndpointStorageContainer storage_container1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Devices/IotHubs/hub1/Endpoints/storage_container_endpoint1
```
:param str resource_name: The name of the resource.
:param EndpointStorageContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EndpointStorageContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
batch_frequency_in_seconds: Optional[pulumi.Input[int]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
encoding: Optional[pulumi.Input[str]] = None,
file_name_format: Optional[pulumi.Input[str]] = None,
iothub_name: Optional[pulumi.Input[str]] = None,
max_chunk_size_in_bytes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EndpointStorageContainerArgs.__new__(EndpointStorageContainerArgs)
__props__.__dict__["batch_frequency_in_seconds"] = batch_frequency_in_seconds
if connection_string is None and not opts.urn:
raise TypeError("Missing required property 'connection_string'")
__props__.__dict__["connection_string"] = connection_string
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
__props__.__dict__["encoding"] = encoding
__props__.__dict__["file_name_format"] = file_name_format
if iothub_name is None and not opts.urn:
raise TypeError("Missing required property 'iothub_name'")
__props__.__dict__["iothub_name"] = iothub_name
__props__.__dict__["max_chunk_size_in_bytes"] = max_chunk_size_in_bytes
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
super(EndpointStorageContainer, __self__).__init__(
'azure:iot/endpointStorageContainer:EndpointStorageContainer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
batch_frequency_in_seconds: Optional[pulumi.Input[int]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
encoding: Optional[pulumi.Input[str]] = None,
file_name_format: Optional[pulumi.Input[str]] = None,
iothub_name: Optional[pulumi.Input[str]] = None,
max_chunk_size_in_bytes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'EndpointStorageContainer':
"""
Get an existing EndpointStorageContainer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
:param pulumi.Input[str] connection_string: The connection string for the endpoint.
:param pulumi.Input[str] container_name: The name of storage container in the storage account.
*
:param pulumi.Input[str] encoding: Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
:param pulumi.Input[str] file_name_format: File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
:param pulumi.Input[str] iothub_name: The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
:param pulumi.Input[int] max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:param pulumi.Input[str] name: The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EndpointStorageContainerState.__new__(_EndpointStorageContainerState)
__props__.__dict__["batch_frequency_in_seconds"] = batch_frequency_in_seconds
__props__.__dict__["connection_string"] = connection_string
__props__.__dict__["container_name"] = container_name
__props__.__dict__["encoding"] = encoding
__props__.__dict__["file_name_format"] = file_name_format
__props__.__dict__["iothub_name"] = iothub_name
__props__.__dict__["max_chunk_size_in_bytes"] = max_chunk_size_in_bytes
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
return EndpointStorageContainer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="batchFrequencyInSeconds")
def batch_frequency_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
"""
return pulumi.get(self, "batch_frequency_in_seconds")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Output[str]:
"""
The connection string for the endpoint.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
"""
The name of storage container in the storage account.
*
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def encoding(self) -> pulumi.Output[Optional[str]]:
"""
Encoding that is used to serialize messages to blobs. Supported values are `Avro`, `AvroDeflate` and `JSON`. Default value is `Avro`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "encoding")
@property
@pulumi.getter(name="fileNameFormat")
def file_name_format(self) -> pulumi.Output[Optional[str]]:
"""
File name format for the blob. Default format is ``{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}``. All parameters are mandatory but can be reordered.
"""
return pulumi.get(self, "file_name_format")
@property
@pulumi.getter(name="iothubName")
def iothub_name(self) -> pulumi.Output[str]:
"""
The name of the IoTHub to which this Storage Container Endpoint belongs. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "iothub_name")
@property
@pulumi.getter(name="maxChunkSizeInBytes")
def max_chunk_size_in_bytes(self) -> pulumi.Output[Optional[int]]:
"""
Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
"""
return pulumi.get(self, "max_chunk_size_in_bytes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: `events`, `operationsMonitoringEvents`, `fileNotifications` and `$default`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group under which the IotHub Storage Container Endpoint resource has to be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
| 53.679549
| 433
| 0.683426
| 4,076
| 33,335
| 5.359666
| 0.067713
| 0.060926
| 0.059599
| 0.05136
| 0.913211
| 0.900027
| 0.892978
| 0.877506
| 0.870686
| 0.856038
| 0
| 0.015114
| 0.223939
| 33,335
| 620
| 434
| 53.766129
| 0.829339
| 0.449978
| 0
| 0.742236
| 1
| 0
| 0.12305
| 0.040113
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161491
| false
| 0.003106
| 0.015528
| 0
| 0.273292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
16e1545200971059010c2e1929615cc3ad6e2e89
| 19,293
|
py
|
Python
|
sdk/python/pulumi_azure/loganalytics/linked_storage_account.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/loganalytics/linked_storage_account.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/loganalytics/linked_storage_account.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['LinkedStorageAccountArgs', 'LinkedStorageAccount']
@pulumi.input_type
class LinkedStorageAccountArgs:
def __init__(__self__, *,
data_source_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
storage_account_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
workspace_resource_id: pulumi.Input[str]):
"""
The set of arguments for constructing a LinkedStorageAccount resource.
:param pulumi.Input[str] data_source_type: The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_account_ids: The storage account resource ids to be linked.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
pulumi.set(__self__, "data_source_type", data_source_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account_ids", storage_account_ids)
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> pulumi.Input[str]:
"""
The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "data_source_type")
@data_source_type.setter
def data_source_type(self, value: pulumi.Input[str]):
pulumi.set(self, "data_source_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccountIds")
def storage_account_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The storage account resource ids to be linked.
"""
return pulumi.get(self, "storage_account_ids")
@storage_account_ids.setter
def storage_account_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "storage_account_ids", value)
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> pulumi.Input[str]:
"""
The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "workspace_resource_id")
@workspace_resource_id.setter
def workspace_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_resource_id", value)
@pulumi.input_type
class _LinkedStorageAccountState:
def __init__(__self__, *,
data_source_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LinkedStorageAccount resources.
:param pulumi.Input[str] data_source_type: The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_account_ids: The storage account resource ids to be linked.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
if data_source_type is not None:
pulumi.set(__self__, "data_source_type", data_source_type)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if storage_account_ids is not None:
pulumi.set(__self__, "storage_account_ids", storage_account_ids)
if workspace_resource_id is not None:
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> Optional[pulumi.Input[str]]:
"""
The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "data_source_type")
@data_source_type.setter
def data_source_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_source_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccountIds")
def storage_account_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The storage account resource ids to be linked.
"""
return pulumi.get(self, "storage_account_ids")
@storage_account_ids.setter
def storage_account_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "storage_account_ids", value)
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "workspace_resource_id")
@workspace_resource_id.setter
def workspace_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_resource_id", value)
class LinkedStorageAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_source_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Log Analytics Linked Storage Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018")
example_linked_storage_account = azure.loganalytics.LinkedStorageAccount("exampleLinkedStorageAccount",
data_source_type="customlogs",
resource_group_name=example_resource_group.name,
workspace_resource_id=example_analytics_workspace.id,
storage_account_ids=[example_account.id])
```
## Import
Log Analytics Linked Storage Accounts can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedStorageAccount:LinkedStorageAccount example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedStorageAccounts/{dataSourceType}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_source_type: The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_account_ids: The storage account resource ids to be linked.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LinkedStorageAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Log Analytics Linked Storage Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018")
example_linked_storage_account = azure.loganalytics.LinkedStorageAccount("exampleLinkedStorageAccount",
data_source_type="customlogs",
resource_group_name=example_resource_group.name,
workspace_resource_id=example_analytics_workspace.id,
storage_account_ids=[example_account.id])
```
## Import
Log Analytics Linked Storage Accounts can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedStorageAccount:LinkedStorageAccount example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedStorageAccounts/{dataSourceType}
```
:param str resource_name: The name of the resource.
:param LinkedStorageAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LinkedStorageAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_source_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LinkedStorageAccountArgs.__new__(LinkedStorageAccountArgs)
if data_source_type is None and not opts.urn:
raise TypeError("Missing required property 'data_source_type'")
__props__.__dict__["data_source_type"] = data_source_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account_ids is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_ids'")
__props__.__dict__["storage_account_ids"] = storage_account_ids
if workspace_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'workspace_resource_id'")
__props__.__dict__["workspace_resource_id"] = workspace_resource_id
super(LinkedStorageAccount, __self__).__init__(
'azure:loganalytics/linkedStorageAccount:LinkedStorageAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
data_source_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
workspace_resource_id: Optional[pulumi.Input[str]] = None) -> 'LinkedStorageAccount':
"""
Get an existing LinkedStorageAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_source_type: The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_account_ids: The storage account resource ids to be linked.
:param pulumi.Input[str] workspace_resource_id: The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LinkedStorageAccountState.__new__(_LinkedStorageAccountState)
__props__.__dict__["data_source_type"] = data_source_type
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["storage_account_ids"] = storage_account_ids
__props__.__dict__["workspace_resource_id"] = workspace_resource_id
return LinkedStorageAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> pulumi.Output[str]:
"""
The data source type which should be used for this Log Analytics Linked Storage Account. Possible values are "customlogs", "azurewatson", "query", "Ingestion" and "alerts". Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "data_source_type")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Log Analytics Linked Storage Account should exist. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="storageAccountIds")
def storage_account_ids(self) -> pulumi.Output[Sequence[str]]:
"""
The storage account resource ids to be linked.
"""
return pulumi.get(self, "storage_account_ids")
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created.
"""
return pulumi.get(self, "workspace_resource_id")
| 54.346479
| 302
| 0.701394
| 2,286
| 19,293
| 5.669729
| 0.08399
| 0.088573
| 0.058329
| 0.075226
| 0.863282
| 0.844997
| 0.829257
| 0.821927
| 0.799167
| 0.78389
| 0
| 0.005089
| 0.215778
| 19,293
| 354
| 303
| 54.5
| 0.851553
| 0.450008
| 0
| 0.564972
| 1
| 0
| 0.139767
| 0.03515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0.00565
| 0.028249
| 0
| 0.271186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc7456a5a6a65b3122cf25e30e244992653fc013
| 50,247
|
py
|
Python
|
.c9/metadata/environment/views.py
|
matthewrr/MASS-Updates
|
3c499927a8ecd14d6febac688affeffecc3b2247
|
[
"MIT"
] | null | null | null |
.c9/metadata/environment/views.py
|
matthewrr/MASS-Updates
|
3c499927a8ecd14d6febac688affeffecc3b2247
|
[
"MIT"
] | null | null | null |
.c9/metadata/environment/views.py
|
matthewrr/MASS-Updates
|
3c499927a8ecd14d6febac688affeffecc3b2247
|
[
"MIT"
] | null | null | null |
{"filter":false,"title":"views.py","tooltip":"/views.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":12,"column":12},"end":{"row":12,"column":13},"action":"remove","lines":["n"],"id":227},{"start":{"row":12,"column":11},"end":{"row":12,"column":12},"action":"remove","lines":["w"]},{"start":{"row":12,"column":10},"end":{"row":12,"column":11},"action":"remove","lines":["o"]},{"start":{"row":12,"column":9},"end":{"row":12,"column":10},"action":"remove","lines":["n"]},{"start":{"row":12,"column":8},"end":{"row":12,"column":9},"action":"remove","lines":["k"]},{"start":{"row":12,"column":7},"end":{"row":12,"column":8},"action":"remove","lines":["n"]},{"start":{"row":12,"column":6},"end":{"row":12,"column":7},"action":"remove","lines":["u"]},{"start":{"row":12,"column":5},"end":{"row":12,"column":6},"action":"remove","lines":["'"]}],[{"start":{"row":12,"column":5},"end":{"row":12,"column":6},"action":"insert","lines":["u"],"id":228},{"start":{"row":12,"column":6},"end":{"row":12,"column":7},"action":"insert","lines":["n"]},{"start":{"row":12,"column":7},"end":{"row":12,"column":8},"action":"insert","lines":["k"]},{"start":{"row":12,"column":8},"end":{"row":12,"column":9},"action":"insert","lines":["n"]},{"start":{"row":12,"column":9},"end":{"row":12,"column":10},"action":"insert","lines":["o"]},{"start":{"row":12,"column":10},"end":{"row":12,"column":11},"action":"insert","lines":["w"]},{"start":{"row":12,"column":11},"end":{"row":12,"column":12},"action":"insert","lines":["n"]},{"start":{"row":12,"column":12},"end":{"row":12,"column":13},"action":"insert","lines":["_"]},{"start":{"row":12,"column":13},"end":{"row":12,"column":14},"action":"insert","lines":["n"]},{"start":{"row":12,"column":14},"end":{"row":12,"column":15},"action":"insert","lines":["u"]},{"start":{"row":12,"column":15},"end":{"row":12,"column":16},"action":"insert","lines":["m"]}],[{"start":{"row":12,"column":16},"end":{"row":12,"column":17},"action":"insert","lines":["b"],"id":229},{"start":{"row":12,"column":17},"end":{"row":12,"column":18},"action":"insert","lines":["e"]},{"start":{"row":12,"column":18},"end":{"row":12,"column":19},"action":"insert","lines":["r"]},{"start":{"row":12,"column":19},"end":{"row":12,"column":20},"action":"insert","lines":["'"]}],[{"start":{"row":12,"column":20},"end":{"row":12,"column":21},"action":"insert","lines":[":"],"id":230}],[{"start":{"row":12,"column":21},"end":{"row":12,"column":22},"action":"insert","lines":[" "],"id":231}],[{"start":{"row":12,"column":22},"end":{"row":12,"column":24},"action":"insert","lines":["\"\""],"id":232}],[{"start":{"row":12,"column":23},"end":{"row":12,"column":24},"action":"insert","lines":["T"],"id":233},{"start":{"row":12,"column":24},"end":{"row":12,"column":25},"action":"insert","lines":["h"]},{"start":{"row":12,"column":25},"end":{"row":12,"column":26},"action":"insert","lines":["i"]},{"start":{"row":12,"column":26},"end":{"row":12,"column":27},"action":"insert","lines":["s"]}],[{"start":{"row":12,"column":27},"end":{"row":12,"column":28},"action":"insert","lines":[" "],"id":234}],[{"start":{"row":12,"column":27},"end":{"row":12,"column":28},"action":"remove","lines":[" "],"id":235},{"start":{"row":12,"column":26},"end":{"row":12,"column":27},"action":"remove","lines":["s"]},{"start":{"row":12,"column":25},"end":{"row":12,"column":26},"action":"remove","lines":["i"]},{"start":{"row":12,"column":24},"end":{"row":12,"column":25},"action":"remove","lines":["h"]},{"start":{"row":12,"column":23},"end":{"row":12,"column":24},"action":"remove","lines":["T"]}],[{"start":{"row":12,"column":23},"end":{"row":12,"column":24},"action":"insert","lines":["E"],"id":236},{"start":{"row":12,"column":24},"end":{"row":12,"column":25},"action":"insert","lines":["r"]},{"start":{"row":12,"column":25},"end":{"row":12,"column":26},"action":"insert","lines":["r"]},{"start":{"row":12,"column":26},"end":{"row":12,"column":27},"action":"insert","lines":["o"]},{"start":{"row":12,"column":27},"end":{"row":12,"column":28},"action":"insert","lines":["r"]},{"start":{"row":12,"column":28},"end":{"row":12,"column":29},"action":"insert","lines":[":"]}],[{"start":{"row":12,"column":29},"end":{"row":12,"column":30},"action":"insert","lines":[" "],"id":237},{"start":{"row":12,"column":30},"end":{"row":12,"column":31},"action":"insert","lines":["t"]},{"start":{"row":12,"column":31},"end":{"row":12,"column":32},"action":"insert","lines":["h"]},{"start":{"row":12,"column":32},"end":{"row":12,"column":33},"action":"insert","lines":["i"]},{"start":{"row":12,"column":33},"end":{"row":12,"column":34},"action":"insert","lines":["s"]}],[{"start":{"row":12,"column":34},"end":{"row":12,"column":35},"action":"insert","lines":[" "],"id":238},{"start":{"row":12,"column":35},"end":{"row":12,"column":36},"action":"insert","lines":["n"]},{"start":{"row":12,"column":36},"end":{"row":12,"column":37},"action":"insert","lines":["u"]},{"start":{"row":12,"column":37},"end":{"row":12,"column":38},"action":"insert","lines":["m"]}],[{"start":{"row":12,"column":37},"end":{"row":12,"column":38},"action":"remove","lines":["m"],"id":239},{"start":{"row":12,"column":36},"end":{"row":12,"column":37},"action":"remove","lines":["u"]},{"start":{"row":12,"column":35},"end":{"row":12,"column":36},"action":"remove","lines":["n"]}],[{"start":{"row":12,"column":35},"end":{"row":12,"column":36},"action":"insert","lines":["p"],"id":240},{"start":{"row":12,"column":36},"end":{"row":12,"column":37},"action":"insert","lines":["h"]},{"start":{"row":12,"column":37},"end":{"row":12,"column":38},"action":"insert","lines":["o"]},{"start":{"row":12,"column":38},"end":{"row":12,"column":39},"action":"insert","lines":["n"]},{"start":{"row":12,"column":39},"end":{"row":12,"column":40},"action":"insert","lines":["e"]}],[{"start":{"row":12,"column":40},"end":{"row":12,"column":41},"action":"insert","lines":[" "],"id":241},{"start":{"row":12,"column":41},"end":{"row":12,"column":42},"action":"insert","lines":["n"]},{"start":{"row":12,"column":42},"end":{"row":12,"column":43},"action":"insert","lines":["u"]},{"start":{"row":12,"column":43},"end":{"row":12,"column":44},"action":"insert","lines":["m"]},{"start":{"row":12,"column":44},"end":{"row":12,"column":45},"action":"insert","lines":["b"]},{"start":{"row":12,"column":45},"end":{"row":12,"column":46},"action":"insert","lines":["e"]},{"start":{"row":12,"column":46},"end":{"row":12,"column":47},"action":"insert","lines":["r"]}],[{"start":{"row":12,"column":47},"end":{"row":12,"column":48},"action":"insert","lines":[" "],"id":242},{"start":{"row":12,"column":48},"end":{"row":12,"column":49},"action":"insert","lines":["i"]},{"start":{"row":12,"column":49},"end":{"row":12,"column":50},"action":"insert","lines":["s"]}],[{"start":{"row":12,"column":50},"end":{"row":12,"column":51},"action":"insert","lines":[" "],"id":243},{"start":{"row":12,"column":51},"end":{"row":12,"column":52},"action":"insert","lines":["n"]},{"start":{"row":12,"column":52},"end":{"row":12,"column":53},"action":"insert","lines":["o"]},{"start":{"row":12,"column":53},"end":{"row":12,"column":54},"action":"insert","lines":["t"]}],[{"start":{"row":12,"column":54},"end":{"row":12,"column":55},"action":"insert","lines":[" "],"id":244},{"start":{"row":12,"column":55},"end":{"row":12,"column":56},"action":"insert","lines":["a"]},{"start":{"row":12,"column":56},"end":{"row":12,"column":57},"action":"insert","lines":["s"]},{"start":{"row":12,"column":57},"end":{"row":12,"column":58},"action":"insert","lines":["s"]},{"start":{"row":12,"column":58},"end":{"row":12,"column":59},"action":"insert","lines":["o"]},{"start":{"row":12,"column":59},"end":{"row":12,"column":60},"action":"insert","lines":["c"]},{"start":{"row":12,"column":60},"end":{"row":12,"column":61},"action":"insert","lines":["i"]},{"start":{"row":12,"column":61},"end":{"row":12,"column":62},"action":"insert","lines":["a"]},{"start":{"row":12,"column":62},"end":{"row":12,"column":63},"action":"insert","lines":["t"]},{"start":{"row":12,"column":63},"end":{"row":12,"column":64},"action":"insert","lines":["e"]},{"start":{"row":12,"column":64},"end":{"row":12,"column":65},"action":"insert","lines":["d"]}],[{"start":{"row":12,"column":65},"end":{"row":12,"column":66},"action":"insert","lines":[" "],"id":245},{"start":{"row":12,"column":66},"end":{"row":12,"column":67},"action":"insert","lines":["w"]},{"start":{"row":12,"column":67},"end":{"row":12,"column":68},"action":"insert","lines":["i"]},{"start":{"row":12,"column":68},"end":{"row":12,"column":69},"action":"insert","lines":["t"]},{"start":{"row":12,"column":69},"end":{"row":12,"column":70},"action":"insert","lines":["h"]}],[{"start":{"row":12,"column":70},"end":{"row":12,"column":71},"action":"insert","lines":[" "],"id":246},{"start":{"row":12,"column":71},"end":{"row":12,"column":72},"action":"insert","lines":["a"]},{"start":{"row":12,"column":72},"end":{"row":12,"column":73},"action":"insert","lines":["n"]}],[{"start":{"row":12,"column":73},"end":{"row":12,"column":74},"action":"insert","lines":[" "],"id":247},{"start":{"row":12,"column":74},"end":{"row":12,"column":75},"action":"insert","lines":["a"]},{"start":{"row":12,"column":75},"end":{"row":12,"column":76},"action":"insert","lines":["c"]},{"start":{"row":12,"column":76},"end":{"row":12,"column":77},"action":"insert","lines":["t"]},{"start":{"row":12,"column":77},"end":{"row":12,"column":78},"action":"insert","lines":["i"]},{"start":{"row":12,"column":78},"end":{"row":12,"column":79},"action":"insert","lines":["v"]},{"start":{"row":12,"column":79},"end":{"row":12,"column":80},"action":"insert","lines":["e"]}],[{"start":{"row":12,"column":80},"end":{"row":12,"column":81},"action":"insert","lines":[" "],"id":248},{"start":{"row":12,"column":81},"end":{"row":12,"column":82},"action":"insert","lines":["e"]},{"start":{"row":12,"column":82},"end":{"row":12,"column":83},"action":"insert","lines":["m"]},{"start":{"row":12,"column":83},"end":{"row":12,"column":84},"action":"insert","lines":["p"]},{"start":{"row":12,"column":84},"end":{"row":12,"column":85},"action":"insert","lines":["l"]},{"start":{"row":12,"column":85},"end":{"row":12,"column":86},"action":"insert","lines":["o"]},{"start":{"row":12,"column":86},"end":{"row":12,"column":87},"action":"insert","lines":["y"]},{"start":{"row":12,"column":87},"end":{"row":12,"column":88},"action":"insert","lines":["e"]},{"start":{"row":12,"column":88},"end":{"row":12,"column":89},"action":"insert","lines":["e"]},{"start":{"row":12,"column":89},"end":{"row":12,"column":90},"action":"insert","lines":["."]}],[{"start":{"row":12,"column":90},"end":{"row":12,"column":91},"action":"insert","lines":[" "],"id":249}],[{"start":{"row":12,"column":91},"end":{"row":12,"column":92},"action":"insert","lines":["P"],"id":250},{"start":{"row":12,"column":92},"end":{"row":12,"column":93},"action":"insert","lines":["l"]},{"start":{"row":12,"column":93},"end":{"row":12,"column":94},"action":"insert","lines":["e"]},{"start":{"row":12,"column":94},"end":{"row":12,"column":95},"action":"insert","lines":["a"]},{"start":{"row":12,"column":95},"end":{"row":12,"column":96},"action":"insert","lines":["s"]},{"start":{"row":12,"column":96},"end":{"row":12,"column":97},"action":"insert","lines":["e"]}],[{"start":{"row":12,"column":97},"end":{"row":12,"column":98},"action":"insert","lines":[" "],"id":251},{"start":{"row":12,"column":98},"end":{"row":12,"column":99},"action":"insert","lines":["r"]},{"start":{"row":12,"column":99},"end":{"row":12,"column":100},"action":"insert","lines":["e"]},{"start":{"row":12,"column":100},"end":{"row":12,"column":101},"action":"insert","lines":["a"]},{"start":{"row":12,"column":101},"end":{"row":12,"column":102},"action":"insert","lines":["c"]},{"start":{"row":12,"column":102},"end":{"row":12,"column":103},"action":"insert","lines":["h"]}],[{"start":{"row":12,"column":103},"end":{"row":12,"column":104},"action":"insert","lines":[" "],"id":252},{"start":{"row":12,"column":104},"end":{"row":12,"column":105},"action":"insert","lines":["o"]},{"start":{"row":12,"column":105},"end":{"row":12,"column":106},"action":"insert","lines":["p"]},{"start":{"row":12,"column":106},"end":{"row":12,"column":107},"action":"insert","lines":["u"]},{"start":{"row":12,"column":107},"end":{"row":12,"column":108},"action":"insert","lines":["t"]}],[{"start":{"row":12,"column":107},"end":{"row":12,"column":108},"action":"remove","lines":["t"],"id":253},{"start":{"row":12,"column":106},"end":{"row":12,"column":107},"action":"remove","lines":["u"]},{"start":{"row":12,"column":105},"end":{"row":12,"column":106},"action":"remove","lines":["p"]}],[{"start":{"row":12,"column":105},"end":{"row":12,"column":106},"action":"insert","lines":["o"],"id":254},{"start":{"row":12,"column":106},"end":{"row":12,"column":107},"action":"insert","lines":["u"]}],[{"start":{"row":12,"column":106},"end":{"row":12,"column":107},"action":"remove","lines":["u"],"id":255},{"start":{"row":12,"column":105},"end":{"row":12,"column":106},"action":"remove","lines":["o"]}],[{"start":{"row":12,"column":105},"end":{"row":12,"column":106},"action":"insert","lines":["u"],"id":256},{"start":{"row":12,"column":106},"end":{"row":12,"column":107},"action":"insert","lines":["t"]}],[{"start":{"row":12,"column":107},"end":{"row":12,"column":108},"action":"insert","lines":[" "],"id":257},{"start":{"row":12,"column":108},"end":{"row":12,"column":109},"action":"insert","lines":["t"]},{"start":{"row":12,"column":109},"end":{"row":12,"column":110},"action":"insert","lines":["o"]}],[{"start":{"row":12,"column":110},"end":{"row":12,"column":111},"action":"insert","lines":[" "],"id":258},{"start":{"row":12,"column":111},"end":{"row":12,"column":112},"action":"insert","lines":["y"]},{"start":{"row":12,"column":112},"end":{"row":12,"column":113},"action":"insert","lines":["o"]},{"start":{"row":12,"column":113},"end":{"row":12,"column":114},"action":"insert","lines":["u"]},{"start":{"row":12,"column":114},"end":{"row":12,"column":115},"action":"insert","lines":["r"]}],[{"start":{"row":12,"column":115},"end":{"row":12,"column":116},"action":"insert","lines":[" "],"id":259},{"start":{"row":12,"column":116},"end":{"row":12,"column":117},"action":"insert","lines":["m"]},{"start":{"row":12,"column":117},"end":{"row":12,"column":118},"action":"insert","lines":["a"]},{"start":{"row":12,"column":118},"end":{"row":12,"column":119},"action":"insert","lines":["n"]},{"start":{"row":12,"column":119},"end":{"row":12,"column":120},"action":"insert","lines":["a"]},{"start":{"row":12,"column":120},"end":{"row":12,"column":121},"action":"insert","lines":["g"]},{"start":{"row":12,"column":121},"end":{"row":12,"column":122},"action":"insert","lines":["e"]},{"start":{"row":12,"column":122},"end":{"row":12,"column":123},"action":"insert","lines":["r"]}],[{"start":{"row":12,"column":123},"end":{"row":12,"column":124},"action":"insert","lines":[" "],"id":260},{"start":{"row":12,"column":124},"end":{"row":12,"column":125},"action":"insert","lines":["i"]},{"start":{"row":12,"column":125},"end":{"row":12,"column":126},"action":"insert","lines":["f"]}],[{"start":{"row":12,"column":126},"end":{"row":12,"column":127},"action":"insert","lines":[" "],"id":261},{"start":{"row":12,"column":127},"end":{"row":12,"column":128},"action":"insert","lines":["y"]},{"start":{"row":12,"column":128},"end":{"row":12,"column":129},"action":"insert","lines":["o"]},{"start":{"row":12,"column":129},"end":{"row":12,"column":130},"action":"insert","lines":["u"]}],[{"start":{"row":12,"column":130},"end":{"row":12,"column":131},"action":"insert","lines":[" "],"id":262},{"start":{"row":12,"column":131},"end":{"row":12,"column":132},"action":"insert","lines":["b"]},{"start":{"row":12,"column":132},"end":{"row":12,"column":133},"action":"insert","lines":["e"]},{"start":{"row":12,"column":133},"end":{"row":12,"column":134},"action":"insert","lines":["l"]},{"start":{"row":12,"column":134},"end":{"row":12,"column":135},"action":"insert","lines":["i"]},{"start":{"row":12,"column":135},"end":{"row":12,"column":136},"action":"insert","lines":["e"]},{"start":{"row":12,"column":136},"end":{"row":12,"column":137},"action":"insert","lines":["v"]},{"start":{"row":12,"column":137},"end":{"row":12,"column":138},"action":"insert","lines":["e"]}],[{"start":{"row":12,"column":138},"end":{"row":12,"column":139},"action":"insert","lines":[" "],"id":263},{"start":{"row":12,"column":139},"end":{"row":12,"column":140},"action":"insert","lines":["t"]},{"start":{"row":12,"column":140},"end":{"row":12,"column":141},"action":"insert","lines":["h"]},{"start":{"row":12,"column":141},"end":{"row":12,"column":142},"action":"insert","lines":["i"]},{"start":{"row":12,"column":142},"end":{"row":12,"column":143},"action":"insert","lines":["s"]}],[{"start":{"row":12,"column":143},"end":{"row":12,"column":144},"action":"insert","lines":[" "],"id":264},{"start":{"row":12,"column":144},"end":{"row":12,"column":145},"action":"insert","lines":["i"]},{"start":{"row":12,"column":145},"end":{"row":12,"column":146},"action":"insert","lines":["s"]}],[{"start":{"row":12,"column":146},"end":{"row":12,"column":147},"action":"insert","lines":[" "],"id":265},{"start":{"row":12,"column":147},"end":{"row":12,"column":148},"action":"insert","lines":["a"]},{"start":{"row":12,"column":148},"end":{"row":12,"column":149},"action":"insert","lines":["n"]}],[{"start":{"row":12,"column":149},"end":{"row":12,"column":150},"action":"insert","lines":[" "],"id":266},{"start":{"row":12,"column":150},"end":{"row":12,"column":151},"action":"insert","lines":["e"]},{"start":{"row":12,"column":151},"end":{"row":12,"column":152},"action":"insert","lines":["r"]},{"start":{"row":12,"column":152},"end":{"row":12,"column":153},"action":"insert","lines":["r"]},{"start":{"row":12,"column":153},"end":{"row":12,"column":154},"action":"insert","lines":["o"]},{"start":{"row":12,"column":154},"end":{"row":12,"column":155},"action":"insert","lines":["r"]},{"start":{"row":12,"column":155},"end":{"row":12,"column":156},"action":"insert","lines":["."]}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":8},"action":"remove","lines":["MESSAGES"],"id":267},{"start":{"row":8,"column":0},"end":{"row":8,"column":1},"action":"insert","lines":["r"]},{"start":{"row":8,"column":1},"end":{"row":8,"column":2},"action":"insert","lines":["e"]},{"start":{"row":8,"column":2},"end":{"row":8,"column":3},"action":"insert","lines":["s"]},{"start":{"row":8,"column":3},"end":{"row":8,"column":4},"action":"insert","lines":["p"]},{"start":{"row":8,"column":4},"end":{"row":8,"column":5},"action":"insert","lines":["o"]},{"start":{"row":8,"column":5},"end":{"row":8,"column":6},"action":"insert","lines":["n"]},{"start":{"row":8,"column":6},"end":{"row":8,"column":7},"action":"insert","lines":["s"]},{"start":{"row":8,"column":7},"end":{"row":8,"column":8},"action":"insert","lines":["e"]},{"start":{"row":8,"column":8},"end":{"row":8,"column":9},"action":"insert","lines":["s"]}],[{"start":{"row":55,"column":17},"end":{"row":55,"column":43},"action":"remove","lines":["'Error: number not found.'"],"id":268},{"start":{"row":55,"column":17},"end":{"row":55,"column":18},"action":"insert","lines":["r"]},{"start":{"row":55,"column":18},"end":{"row":55,"column":19},"action":"insert","lines":["e"]},{"start":{"row":55,"column":19},"end":{"row":55,"column":20},"action":"insert","lines":["s"]},{"start":{"row":55,"column":20},"end":{"row":55,"column":21},"action":"insert","lines":["p"]},{"start":{"row":55,"column":21},"end":{"row":55,"column":22},"action":"insert","lines":["o"]},{"start":{"row":55,"column":22},"end":{"row":55,"column":23},"action":"insert","lines":["n"]},{"start":{"row":55,"column":23},"end":{"row":55,"column":24},"action":"insert","lines":["s"]},{"start":{"row":55,"column":24},"end":{"row":55,"column":25},"action":"insert","lines":["e"]},{"start":{"row":55,"column":25},"end":{"row":55,"column":26},"action":"insert","lines":["s"]}],[{"start":{"row":55,"column":26},"end":{"row":55,"column":28},"action":"insert","lines":["[]"],"id":269}],[{"start":{"row":55,"column":27},"end":{"row":55,"column":29},"action":"insert","lines":["''"],"id":270}],[{"start":{"row":55,"column":28},"end":{"row":55,"column":29},"action":"insert","lines":["u"],"id":271},{"start":{"row":55,"column":29},"end":{"row":55,"column":30},"action":"insert","lines":["n"]},{"start":{"row":55,"column":30},"end":{"row":55,"column":31},"action":"insert","lines":["k"]},{"start":{"row":55,"column":31},"end":{"row":55,"column":32},"action":"insert","lines":["n"]},{"start":{"row":55,"column":32},"end":{"row":55,"column":33},"action":"insert","lines":["o"]},{"start":{"row":55,"column":33},"end":{"row":55,"column":34},"action":"insert","lines":["w"]},{"start":{"row":55,"column":34},"end":{"row":55,"column":35},"action":"insert","lines":["n"]},{"start":{"row":55,"column":35},"end":{"row":55,"column":36},"action":"insert","lines":["_"]},{"start":{"row":55,"column":36},"end":{"row":55,"column":37},"action":"insert","lines":["n"]},{"start":{"row":55,"column":37},"end":{"row":55,"column":38},"action":"insert","lines":["u"]}],[{"start":{"row":55,"column":38},"end":{"row":55,"column":39},"action":"insert","lines":["m"],"id":272},{"start":{"row":55,"column":39},"end":{"row":55,"column":40},"action":"insert","lines":["b"]},{"start":{"row":55,"column":40},"end":{"row":55,"column":41},"action":"insert","lines":["e"]},{"start":{"row":55,"column":41},"end":{"row":55,"column":42},"action":"insert","lines":["r"]}],[{"start":{"row":49,"column":25},"end":{"row":49,"column":56},"action":"remove","lines":["'Error: not a correct command.'"],"id":273},{"start":{"row":49,"column":25},"end":{"row":49,"column":26},"action":"insert","lines":["r"]},{"start":{"row":49,"column":26},"end":{"row":49,"column":27},"action":"insert","lines":["e"]},{"start":{"row":49,"column":27},"end":{"row":49,"column":28},"action":"insert","lines":["s"]},{"start":{"row":49,"column":28},"end":{"row":49,"column":29},"action":"insert","lines":["p"]},{"start":{"row":49,"column":29},"end":{"row":49,"column":30},"action":"insert","lines":["o"]},{"start":{"row":49,"column":30},"end":{"row":49,"column":31},"action":"insert","lines":["n"]},{"start":{"row":49,"column":31},"end":{"row":49,"column":32},"action":"insert","lines":["s"]},{"start":{"row":49,"column":32},"end":{"row":49,"column":33},"action":"insert","lines":["e"]},{"start":{"row":49,"column":33},"end":{"row":49,"column":34},"action":"insert","lines":["s"]}],[{"start":{"row":49,"column":34},"end":{"row":49,"column":36},"action":"insert","lines":["[]"],"id":275}],[{"start":{"row":49,"column":35},"end":{"row":49,"column":61},"action":"insert","lines":["'employee_unknown_command'"],"id":276}],[{"start":{"row":40,"column":26},"end":{"row":40,"column":69},"action":"remove","lines":["Error: you must begin text with \"message\".'"],"id":277},{"start":{"row":40,"column":26},"end":{"row":40,"column":27},"action":"insert","lines":["r"]},{"start":{"row":40,"column":27},"end":{"row":40,"column":28},"action":"insert","lines":["e"]},{"start":{"row":40,"column":28},"end":{"row":40,"column":29},"action":"insert","lines":["s"]},{"start":{"row":40,"column":29},"end":{"row":40,"column":30},"action":"insert","lines":["p"]},{"start":{"row":40,"column":30},"end":{"row":40,"column":31},"action":"insert","lines":["o"]},{"start":{"row":40,"column":31},"end":{"row":40,"column":32},"action":"insert","lines":["n"]},{"start":{"row":40,"column":32},"end":{"row":40,"column":33},"action":"insert","lines":["s"]},{"start":{"row":40,"column":33},"end":{"row":40,"column":34},"action":"insert","lines":["e"]},{"start":{"row":40,"column":34},"end":{"row":40,"column":35},"action":"insert","lines":["s"]}],[{"start":{"row":40,"column":34},"end":{"row":40,"column":35},"action":"remove","lines":["s"],"id":278},{"start":{"row":40,"column":33},"end":{"row":40,"column":34},"action":"remove","lines":["e"]},{"start":{"row":40,"column":32},"end":{"row":40,"column":33},"action":"remove","lines":["s"]},{"start":{"row":40,"column":31},"end":{"row":40,"column":32},"action":"remove","lines":["n"]},{"start":{"row":40,"column":30},"end":{"row":40,"column":31},"action":"remove","lines":["o"]},{"start":{"row":40,"column":29},"end":{"row":40,"column":30},"action":"remove","lines":["p"]},{"start":{"row":40,"column":28},"end":{"row":40,"column":29},"action":"remove","lines":["s"]},{"start":{"row":40,"column":27},"end":{"row":40,"column":28},"action":"remove","lines":["e"]},{"start":{"row":40,"column":26},"end":{"row":40,"column":27},"action":"remove","lines":["r"]},{"start":{"row":40,"column":25},"end":{"row":40,"column":26},"action":"remove","lines":["'"]}],[{"start":{"row":40,"column":25},"end":{"row":40,"column":26},"action":"insert","lines":["r"],"id":279},{"start":{"row":40,"column":26},"end":{"row":40,"column":27},"action":"insert","lines":["e"]},{"start":{"row":40,"column":27},"end":{"row":40,"column":28},"action":"insert","lines":["s"]},{"start":{"row":40,"column":28},"end":{"row":40,"column":29},"action":"insert","lines":["p"]},{"start":{"row":40,"column":29},"end":{"row":40,"column":30},"action":"insert","lines":["o"]},{"start":{"row":40,"column":30},"end":{"row":40,"column":31},"action":"insert","lines":["n"]},{"start":{"row":40,"column":31},"end":{"row":40,"column":32},"action":"insert","lines":["s"]},{"start":{"row":40,"column":32},"end":{"row":40,"column":33},"action":"insert","lines":["e"]},{"start":{"row":40,"column":33},"end":{"row":40,"column":34},"action":"insert","lines":["s"]}],[{"start":{"row":40,"column":34},"end":{"row":40,"column":36},"action":"insert","lines":["[]"],"id":280}],[{"start":{"row":40,"column":35},"end":{"row":40,"column":37},"action":"insert","lines":["''"],"id":281}],[{"start":{"row":40,"column":36},"end":{"row":40,"column":37},"action":"insert","lines":["a"],"id":282},{"start":{"row":40,"column":37},"end":{"row":40,"column":38},"action":"insert","lines":["d"]},{"start":{"row":40,"column":38},"end":{"row":40,"column":39},"action":"insert","lines":["m"]},{"start":{"row":40,"column":39},"end":{"row":40,"column":40},"action":"insert","lines":["i"]},{"start":{"row":40,"column":40},"end":{"row":40,"column":41},"action":"insert","lines":["n"]},{"start":{"row":40,"column":41},"end":{"row":40,"column":42},"action":"insert","lines":["_"]},{"start":{"row":40,"column":42},"end":{"row":40,"column":43},"action":"insert","lines":["k"]},{"start":{"row":40,"column":43},"end":{"row":40,"column":44},"action":"insert","lines":["n"]},{"start":{"row":40,"column":44},"end":{"row":40,"column":45},"action":"insert","lines":["o"]},{"start":{"row":40,"column":45},"end":{"row":40,"column":46},"action":"insert","lines":["w"]},{"start":{"row":40,"column":46},"end":{"row":40,"column":47},"action":"insert","lines":["n"]},{"start":{"row":40,"column":47},"end":{"row":40,"column":48},"action":"insert","lines":["_"]}],[{"start":{"row":40,"column":47},"end":{"row":40,"column":48},"action":"remove","lines":["_"],"id":283},{"start":{"row":40,"column":46},"end":{"row":40,"column":47},"action":"remove","lines":["n"]},{"start":{"row":40,"column":45},"end":{"row":40,"column":46},"action":"remove","lines":["w"]},{"start":{"row":40,"column":44},"end":{"row":40,"column":45},"action":"remove","lines":["o"]},{"start":{"row":40,"column":43},"end":{"row":40,"column":44},"action":"remove","lines":["n"]},{"start":{"row":40,"column":42},"end":{"row":40,"column":43},"action":"remove","lines":["k"]}],[{"start":{"row":40,"column":42},"end":{"row":40,"column":43},"action":"insert","lines":["u"],"id":284},{"start":{"row":40,"column":43},"end":{"row":40,"column":44},"action":"insert","lines":["n"]},{"start":{"row":40,"column":44},"end":{"row":40,"column":45},"action":"insert","lines":["k"]},{"start":{"row":40,"column":45},"end":{"row":40,"column":46},"action":"insert","lines":["n"]},{"start":{"row":40,"column":46},"end":{"row":40,"column":47},"action":"insert","lines":["o"]},{"start":{"row":40,"column":47},"end":{"row":40,"column":48},"action":"insert","lines":["w"]},{"start":{"row":40,"column":48},"end":{"row":40,"column":49},"action":"insert","lines":["n"]}],[{"start":{"row":40,"column":50},"end":{"row":40,"column":51},"action":"insert","lines":["_"],"id":285},{"start":{"row":40,"column":51},"end":{"row":40,"column":52},"action":"insert","lines":["c"]},{"start":{"row":40,"column":52},"end":{"row":40,"column":53},"action":"insert","lines":["o"]},{"start":{"row":40,"column":53},"end":{"row":40,"column":54},"action":"insert","lines":["m"]},{"start":{"row":40,"column":54},"end":{"row":40,"column":55},"action":"insert","lines":["m"]},{"start":{"row":40,"column":55},"end":{"row":40,"column":56},"action":"insert","lines":["a"]},{"start":{"row":40,"column":56},"end":{"row":40,"column":57},"action":"insert","lines":["n"]},{"start":{"row":40,"column":57},"end":{"row":40,"column":58},"action":"insert","lines":["d"]}],[{"start":{"row":40,"column":57},"end":{"row":40,"column":58},"action":"remove","lines":["d"],"id":286},{"start":{"row":40,"column":56},"end":{"row":40,"column":57},"action":"remove","lines":["n"]},{"start":{"row":40,"column":55},"end":{"row":40,"column":56},"action":"remove","lines":["a"]},{"start":{"row":40,"column":54},"end":{"row":40,"column":55},"action":"remove","lines":["m"]},{"start":{"row":40,"column":53},"end":{"row":40,"column":54},"action":"remove","lines":["m"]},{"start":{"row":40,"column":52},"end":{"row":40,"column":53},"action":"remove","lines":["o"]},{"start":{"row":40,"column":51},"end":{"row":40,"column":52},"action":"remove","lines":["c"]},{"start":{"row":40,"column":50},"end":{"row":40,"column":51},"action":"remove","lines":["_"]},{"start":{"row":40,"column":49},"end":{"row":40,"column":50},"action":"remove","lines":["'"]}],[{"start":{"row":40,"column":49},"end":{"row":40,"column":50},"action":"insert","lines":["'"],"id":287}],[{"start":{"row":40,"column":49},"end":{"row":40,"column":50},"action":"insert","lines":["_"],"id":288},{"start":{"row":40,"column":50},"end":{"row":40,"column":51},"action":"insert","lines":["c"]},{"start":{"row":40,"column":51},"end":{"row":40,"column":52},"action":"insert","lines":["o"]},{"start":{"row":40,"column":52},"end":{"row":40,"column":53},"action":"insert","lines":["m"]},{"start":{"row":40,"column":53},"end":{"row":40,"column":54},"action":"insert","lines":["m"]},{"start":{"row":40,"column":54},"end":{"row":40,"column":55},"action":"insert","lines":["a"]},{"start":{"row":40,"column":55},"end":{"row":40,"column":56},"action":"insert","lines":["n"]},{"start":{"row":40,"column":56},"end":{"row":40,"column":57},"action":"insert","lines":["d"]}],[{"start":{"row":32,"column":20},"end":{"row":32,"column":24},"action":"remove","lines":[" "],"id":289},{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "]},{"start":{"row":32,"column":12},"end":{"row":32,"column":16},"action":"remove","lines":[" "]},{"start":{"row":32,"column":8},"end":{"row":32,"column":12},"action":"remove","lines":[" "]},{"start":{"row":32,"column":4},"end":{"row":32,"column":8},"action":"remove","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "]},{"start":{"row":31,"column":43},"end":{"row":32,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":32,"column":20},"end":{"row":32,"column":24},"action":"remove","lines":[" "],"id":290},{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "]},{"start":{"row":32,"column":12},"end":{"row":32,"column":16},"action":"remove","lines":[" "]},{"start":{"row":32,"column":8},"end":{"row":32,"column":12},"action":"remove","lines":[" "]},{"start":{"row":32,"column":4},"end":{"row":32,"column":8},"action":"remove","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "]},{"start":{"row":31,"column":59},"end":{"row":32,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":32,"column":20},"end":{"row":32,"column":24},"action":"remove","lines":[" "],"id":291},{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "]},{"start":{"row":32,"column":12},"end":{"row":32,"column":16},"action":"remove","lines":[" "]},{"start":{"row":32,"column":8},"end":{"row":32,"column":12},"action":"remove","lines":[" "]},{"start":{"row":32,"column":4},"end":{"row":32,"column":8},"action":"remove","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "]},{"start":{"row":31,"column":79},"end":{"row":32,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "],"id":292},{"start":{"row":32,"column":12},"end":{"row":32,"column":16},"action":"remove","lines":[" "]},{"start":{"row":32,"column":8},"end":{"row":32,"column":12},"action":"remove","lines":[" "]},{"start":{"row":32,"column":4},"end":{"row":32,"column":8},"action":"remove","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "]},{"start":{"row":31,"column":88},"end":{"row":32,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":31,"column":89},"end":{"row":32,"column":0},"action":"insert","lines":["",""],"id":293},{"start":{"row":32,"column":0},"end":{"row":32,"column":20},"action":"insert","lines":[" "]}],[{"start":{"row":32,"column":20},"end":{"row":32,"column":89},"action":"insert","lines":["client.messages.create(to=phone_number,from_=twilio_number,body=body)"],"id":294}],[{"start":{"row":32,"column":57},"end":{"row":32,"column":58},"action":"remove","lines":["r"],"id":295},{"start":{"row":32,"column":56},"end":{"row":32,"column":57},"action":"remove","lines":["e"]},{"start":{"row":32,"column":55},"end":{"row":32,"column":56},"action":"remove","lines":["b"]},{"start":{"row":32,"column":54},"end":{"row":32,"column":55},"action":"remove","lines":["m"]},{"start":{"row":32,"column":53},"end":{"row":32,"column":54},"action":"remove","lines":["u"]},{"start":{"row":32,"column":52},"end":{"row":32,"column":53},"action":"remove","lines":["n"]},{"start":{"row":32,"column":51},"end":{"row":32,"column":52},"action":"remove","lines":["_"]},{"start":{"row":32,"column":50},"end":{"row":32,"column":51},"action":"remove","lines":["e"]},{"start":{"row":32,"column":49},"end":{"row":32,"column":50},"action":"remove","lines":["n"]},{"start":{"row":32,"column":48},"end":{"row":32,"column":49},"action":"remove","lines":["o"]},{"start":{"row":32,"column":47},"end":{"row":32,"column":48},"action":"remove","lines":["h"]},{"start":{"row":32,"column":46},"end":{"row":32,"column":47},"action":"remove","lines":["p"]}],[{"start":{"row":32,"column":46},"end":{"row":32,"column":47},"action":"insert","lines":["s"],"id":296},{"start":{"row":32,"column":47},"end":{"row":32,"column":48},"action":"insert","lines":["e"]},{"start":{"row":32,"column":48},"end":{"row":32,"column":49},"action":"insert","lines":["n"]},{"start":{"row":32,"column":49},"end":{"row":32,"column":50},"action":"insert","lines":["d"]},{"start":{"row":32,"column":50},"end":{"row":32,"column":51},"action":"insert","lines":["e"]},{"start":{"row":32,"column":51},"end":{"row":32,"column":52},"action":"insert","lines":["r"]}],[{"start":{"row":9,"column":22},"end":{"row":9,"column":23},"action":"remove","lines":["n"],"id":297},{"start":{"row":9,"column":21},"end":{"row":9,"column":22},"action":"remove","lines":["o"]},{"start":{"row":9,"column":20},"end":{"row":9,"column":21},"action":"remove","lines":["i"]},{"start":{"row":9,"column":19},"end":{"row":9,"column":20},"action":"remove","lines":["t"]},{"start":{"row":9,"column":18},"end":{"row":9,"column":19},"action":"remove","lines":["a"]},{"start":{"row":9,"column":17},"end":{"row":9,"column":18},"action":"remove","lines":["m"]},{"start":{"row":9,"column":16},"end":{"row":9,"column":17},"action":"remove","lines":["r"]},{"start":{"row":9,"column":15},"end":{"row":9,"column":16},"action":"remove","lines":["i"]},{"start":{"row":9,"column":14},"end":{"row":9,"column":15},"action":"remove","lines":["f"]},{"start":{"row":9,"column":13},"end":{"row":9,"column":14},"action":"remove","lines":["n"]},{"start":{"row":9,"column":12},"end":{"row":9,"column":13},"action":"remove","lines":["o"]},{"start":{"row":9,"column":11},"end":{"row":9,"column":12},"action":"remove","lines":["c"]}],[{"start":{"row":9,"column":11},"end":{"row":9,"column":12},"action":"insert","lines":["s"],"id":298},{"start":{"row":9,"column":12},"end":{"row":9,"column":13},"action":"insert","lines":["u"]},{"start":{"row":9,"column":13},"end":{"row":9,"column":14},"action":"insert","lines":["c"]},{"start":{"row":9,"column":14},"end":{"row":9,"column":15},"action":"insert","lines":["c"]},{"start":{"row":9,"column":15},"end":{"row":9,"column":16},"action":"insert","lines":["e"]},{"start":{"row":9,"column":16},"end":{"row":9,"column":17},"action":"insert","lines":["s"]},{"start":{"row":9,"column":17},"end":{"row":9,"column":18},"action":"insert","lines":["s"]}],[{"start":{"row":32,"column":20},"end":{"row":32,"column":83},"action":"remove","lines":["client.messages.create(to=sender,from_=twilio_number,body=body)"],"id":299},{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "]},{"start":{"row":32,"column":12},"end":{"row":32,"column":16},"action":"remove","lines":[" "]},{"start":{"row":32,"column":8},"end":{"row":32,"column":12},"action":"remove","lines":[" "]},{"start":{"row":32,"column":4},"end":{"row":32,"column":8},"action":"remove","lines":[" "]},{"start":{"row":32,"column":0},"end":{"row":32,"column":4},"action":"remove","lines":[" "]},{"start":{"row":31,"column":89},"end":{"row":32,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":31,"column":89},"end":{"row":32,"column":0},"action":"insert","lines":["",""],"id":300},{"start":{"row":32,"column":0},"end":{"row":32,"column":20},"action":"insert","lines":[" "]}],[{"start":{"row":32,"column":16},"end":{"row":32,"column":20},"action":"remove","lines":[" "],"id":301}],[{"start":{"row":32,"column":16},"end":{"row":32,"column":79},"action":"insert","lines":["client.messages.create(to=sender,from_=twilio_number,body=body)"],"id":302}],[{"start":{"row":32,"column":77},"end":{"row":32,"column":78},"action":"remove","lines":["y"],"id":303},{"start":{"row":32,"column":76},"end":{"row":32,"column":77},"action":"remove","lines":["d"]},{"start":{"row":32,"column":75},"end":{"row":32,"column":76},"action":"remove","lines":["o"]},{"start":{"row":32,"column":74},"end":{"row":32,"column":75},"action":"remove","lines":["b"]}],[{"start":{"row":32,"column":74},"end":{"row":32,"column":75},"action":"insert","lines":["m"],"id":304},{"start":{"row":32,"column":75},"end":{"row":32,"column":76},"action":"insert","lines":["e"]},{"start":{"row":32,"column":76},"end":{"row":32,"column":77},"action":"insert","lines":["s"]},{"start":{"row":32,"column":77},"end":{"row":32,"column":78},"action":"insert","lines":["s"]},{"start":{"row":32,"column":78},"end":{"row":32,"column":79},"action":"insert","lines":["a"]},{"start":{"row":32,"column":79},"end":{"row":32,"column":80},"action":"insert","lines":["g"]},{"start":{"row":32,"column":80},"end":{"row":32,"column":81},"action":"insert","lines":["e"]}],[{"start":{"row":32,"column":80},"end":{"row":32,"column":81},"action":"remove","lines":["e"],"id":305},{"start":{"row":32,"column":79},"end":{"row":32,"column":80},"action":"remove","lines":["g"]},{"start":{"row":32,"column":78},"end":{"row":32,"column":79},"action":"remove","lines":["a"]},{"start":{"row":32,"column":77},"end":{"row":32,"column":78},"action":"remove","lines":["s"]},{"start":{"row":32,"column":76},"end":{"row":32,"column":77},"action":"remove","lines":["s"]},{"start":{"row":32,"column":75},"end":{"row":32,"column":76},"action":"remove","lines":["e"]},{"start":{"row":32,"column":74},"end":{"row":32,"column":75},"action":"remove","lines":["m"]}],[{"start":{"row":32,"column":74},"end":{"row":32,"column":75},"action":"insert","lines":["r"],"id":306},{"start":{"row":32,"column":75},"end":{"row":32,"column":76},"action":"insert","lines":["e"]},{"start":{"row":32,"column":76},"end":{"row":32,"column":77},"action":"insert","lines":["s"]},{"start":{"row":32,"column":77},"end":{"row":32,"column":78},"action":"insert","lines":["p"]},{"start":{"row":32,"column":78},"end":{"row":32,"column":79},"action":"insert","lines":["o"]},{"start":{"row":32,"column":79},"end":{"row":32,"column":80},"action":"insert","lines":["n"]},{"start":{"row":32,"column":80},"end":{"row":32,"column":81},"action":"insert","lines":["s"]},{"start":{"row":32,"column":81},"end":{"row":32,"column":82},"action":"insert","lines":["e"]},{"start":{"row":32,"column":82},"end":{"row":32,"column":83},"action":"insert","lines":["s"]}],[{"start":{"row":32,"column":83},"end":{"row":32,"column":85},"action":"insert","lines":["[]"],"id":307}],[{"start":{"row":32,"column":84},"end":{"row":32,"column":86},"action":"insert","lines":["''"],"id":308}],[{"start":{"row":32,"column":85},"end":{"row":32,"column":86},"action":"insert","lines":["a"],"id":309},{"start":{"row":32,"column":86},"end":{"row":32,"column":87},"action":"insert","lines":["d"]},{"start":{"row":32,"column":87},"end":{"row":32,"column":88},"action":"insert","lines":["m"]},{"start":{"row":32,"column":88},"end":{"row":32,"column":89},"action":"insert","lines":["i"]},{"start":{"row":32,"column":89},"end":{"row":32,"column":90},"action":"insert","lines":["n"]},{"start":{"row":32,"column":90},"end":{"row":32,"column":91},"action":"insert","lines":["_"]},{"start":{"row":32,"column":91},"end":{"row":32,"column":92},"action":"insert","lines":["s"]},{"start":{"row":32,"column":92},"end":{"row":32,"column":93},"action":"insert","lines":["u"]},{"start":{"row":32,"column":93},"end":{"row":32,"column":94},"action":"insert","lines":["c"]},{"start":{"row":32,"column":94},"end":{"row":32,"column":95},"action":"insert","lines":["c"]},{"start":{"row":32,"column":95},"end":{"row":32,"column":96},"action":"insert","lines":["e"]},{"start":{"row":32,"column":96},"end":{"row":32,"column":97},"action":"insert","lines":["s"]},{"start":{"row":32,"column":97},"end":{"row":32,"column":98},"action":"insert","lines":["s"]}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":20},"action":"remove","lines":[" "],"id":310},{"start":{"row":35,"column":12},"end":{"row":35,"column":16},"action":"remove","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":12},"action":"remove","lines":[" "]},{"start":{"row":35,"column":4},"end":{"row":35,"column":8},"action":"remove","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"remove","lines":[" "]},{"start":{"row":34,"column":39},"end":{"row":35,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":20},"action":"remove","lines":[" "],"id":311},{"start":{"row":35,"column":12},"end":{"row":35,"column":16},"action":"remove","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":12},"action":"remove","lines":[" "]},{"start":{"row":35,"column":4},"end":{"row":35,"column":8},"action":"remove","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"remove","lines":[" "]},{"start":{"row":34,"column":49},"end":{"row":35,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":20},"action":"remove","lines":[" "],"id":312},{"start":{"row":35,"column":12},"end":{"row":35,"column":16},"action":"remove","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":12},"action":"remove","lines":[" "]},{"start":{"row":35,"column":4},"end":{"row":35,"column":8},"action":"remove","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"remove","lines":[" "]},{"start":{"row":34,"column":69},"end":{"row":35,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":35,"column":12},"end":{"row":35,"column":16},"action":"remove","lines":[" "],"id":313},{"start":{"row":35,"column":8},"end":{"row":35,"column":12},"action":"remove","lines":[" "]},{"start":{"row":35,"column":4},"end":{"row":35,"column":8},"action":"remove","lines":[" "]},{"start":{"row":35,"column":0},"end":{"row":35,"column":4},"action":"remove","lines":[" "]},{"start":{"row":34,"column":108},"end":{"row":35,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":16},"end":{"row":40,"column":20},"action":"remove","lines":[" "],"id":314},{"start":{"row":40,"column":12},"end":{"row":40,"column":16},"action":"remove","lines":[" "]},{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "]},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":39},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":16},"end":{"row":40,"column":20},"action":"remove","lines":[" "],"id":315},{"start":{"row":40,"column":12},"end":{"row":40,"column":16},"action":"remove","lines":[" "]},{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "]},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":39,"column":49},"end":{"row":40,"column":0},"action":"remove","lines":["",""],"id":316}],[{"start":{"row":40,"column":16},"end":{"row":40,"column":20},"action":"remove","lines":[" "],"id":317},{"start":{"row":40,"column":12},"end":{"row":40,"column":16},"action":"remove","lines":[" "]},{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "]},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":69},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":12},"end":{"row":40,"column":16},"action":"remove","lines":[" "],"id":318},{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "]},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":111},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":34,"column":12},"end":{"row":34,"column":16},"action":"remove","lines":[" "],"id":319},{"start":{"row":34,"column":8},"end":{"row":34,"column":12},"action":"remove","lines":[" "]},{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "]},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"remove","lines":[" "]},{"start":{"row":33,"column":17},"end":{"row":34,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":33,"column":17},"end":{"row":33,"column":18},"action":"insert","lines":[" "],"id":320}],[{"start":{"row":38,"column":12},"end":{"row":38,"column":16},"action":"remove","lines":[" "],"id":321},{"start":{"row":38,"column":8},"end":{"row":38,"column":12},"action":"remove","lines":[" "]},{"start":{"row":38,"column":4},"end":{"row":38,"column":8},"action":"remove","lines":[" "]},{"start":{"row":38,"column":0},"end":{"row":38,"column":4},"action":"remove","lines":[" "]},{"start":{"row":37,"column":17},"end":{"row":38,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":37,"column":17},"end":{"row":37,"column":18},"action":"insert","lines":[" "],"id":322}],[{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "],"id":323},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":31},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "],"id":324},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":41},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":8},"end":{"row":40,"column":12},"action":"remove","lines":[" "],"id":325},{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "]},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":61},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":40,"column":4},"end":{"row":40,"column":8},"action":"remove","lines":[" "],"id":326},{"start":{"row":40,"column":0},"end":{"row":40,"column":4},"action":"remove","lines":[" "]},{"start":{"row":39,"column":93},"end":{"row":40,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":8},"action":"remove","lines":[" "],"id":327},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"remove","lines":[" "]},{"start":{"row":38,"column":9},"end":{"row":39,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":[" "],"id":328}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":9,"column":84},"end":{"row":9,"column":84},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1530237096308,"hash":"b0721232c24bbbee29589b479d74a927c9ed1335"}
| 50,247
| 50,247
| 0.546162
| 7,291
| 50,247
| 3.76135
| 0.046359
| 0.14965
| 0.14841
| 0.107935
| 0.823366
| 0.776765
| 0.730564
| 0.714228
| 0.501641
| 0.491358
| 0
| 0.087708
| 0.008637
| 50,247
| 1
| 50,247
| 50,247
| 0.462831
| 0
| 0
| 0
| 0
| 0
| 0.469133
| 0.005194
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
bc8679921ae8cca54f687f4c0619b666cab34b53
| 48,513
|
py
|
Python
|
katpoint/test/test_projection.py
|
ska-sa/katpoint
|
7cbac9c2f461e4209a147bda93572b7f523531d4
|
[
"BSD-3-Clause"
] | 1
|
2019-08-26T06:26:47.000Z
|
2019-08-26T06:26:47.000Z
|
katpoint/test/test_projection.py
|
ska-sa/katpoint
|
7cbac9c2f461e4209a147bda93572b7f523531d4
|
[
"BSD-3-Clause"
] | 23
|
2018-11-20T15:41:40.000Z
|
2021-08-03T20:39:21.000Z
|
katpoint/test/test_projection.py
|
ska-sa/katpoint
|
7cbac9c2f461e4209a147bda93572b7f523531d4
|
[
"BSD-3-Clause"
] | 4
|
2019-07-22T08:01:03.000Z
|
2021-02-23T07:09:04.000Z
|
################################################################################
# Copyright (c) 2009-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for the projection module."""
from __future__ import print_function, division, absolute_import
import threading
import unittest
import numpy as np
import katpoint
from katpoint.projection import (OutOfRangeError, out_of_range_context, treat_out_of_range_values,
set_out_of_range_treatment, get_out_of_range_treatment)
try:
from .aips_projection import newpos, dircos
found_aips = True
except ImportError:
found_aips = False
def skip(reason=''):
"""Use nose to skip a test."""
try:
import nose
raise nose.SkipTest(reason)
except ImportError:
pass
def assert_angles_almost_equal(x, y, decimal):
def primary_angle(x):
return x - np.round(x / (2.0 * np.pi)) * 2.0 * np.pi
x = np.asarray(x)
y = np.asarray(y)
np.testing.assert_array_equal(0 * x, 0 * y,
'Array shapes and/or NaN patterns differ')
d = primary_angle(np.nan_to_num(x - y))
np.testing.assert_almost_equal(d, np.zeros(np.shape(x)), decimal=decimal)
class TestOutOfRangeTreatment(unittest.TestCase):
"""Test treatment of out-of-range values."""
def setUp(self):
self._old_treatment = get_out_of_range_treatment()
def test_treatment_setup(self):
set_out_of_range_treatment('raise')
self.assertEqual(get_out_of_range_treatment(), 'raise')
set_out_of_range_treatment('nan')
self.assertEqual(get_out_of_range_treatment(), 'nan')
set_out_of_range_treatment('clip')
self.assertEqual(get_out_of_range_treatment(), 'clip')
with self.assertRaises(ValueError):
set_out_of_range_treatment('bad treatment')
with out_of_range_context('raise'):
self.assertEqual(get_out_of_range_treatment(), 'raise')
self.assertEqual(get_out_of_range_treatment(), 'clip')
def test_out_of_range_handling_array(self):
x = [1, 2, 3, 4]
y = treat_out_of_range_values(x, 'Should not happen', lower=0, upper=5)
np.testing.assert_array_equal(y, x)
with out_of_range_context('raise'):
with self.assertRaises(OutOfRangeError):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
np.testing.assert_array_equal(y, [np.nan, np.nan, 3.0, 4.0])
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
np.testing.assert_array_equal(y, [1.0, 1.1, 1.1, 1.1])
def test_out_of_range_handling_scalar(self):
x = 2
y = treat_out_of_range_values(x, 'Should not happen', lower=0, upper=5)
np.testing.assert_array_equal(y, x)
with out_of_range_context('raise'):
with self.assertRaises(OutOfRangeError):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
np.testing.assert_array_equal(y, np.nan)
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
np.testing.assert_array_equal(y, 1.1)
def test_scalar_vs_0d(self):
with out_of_range_context('clip'):
x = 2.0
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
assert np.isscalar(y)
x = np.array(2.0)
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
assert not np.isscalar(y)
def test_clipping_of_minor_outliers(self):
x = 1.0 + np.finfo(float).eps
with out_of_range_context('raise'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
def test_threading(self):
def my_thread():
try:
result.append(treat_out_of_range_values(2.0, 'Should raise', upper=1.0))
except Exception as exc:
result.append(exc)
result = []
thread = threading.Thread(target=my_thread)
with out_of_range_context('nan'):
# Make sure the thread code runs inside our out_of_range_context
thread.start()
thread.join()
assert isinstance(result[0], OutOfRangeError)
def tearDown(self):
set_out_of_range_treatment(self._old_treatment)
class TestProjectionSIN(unittest.TestCase):
"""Test orthographic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['SIN']
self.sphere_to_plane = katpoint.sphere_to_plane['SIN']
N = 100
max_theta = np.pi / 2.0
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# (x, y) points within unit circle
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
self.x = np.sin(theta) * np.cos(phi)
self.y = np.sin(theta) * np.sin(phi)
def test_random_closure(self):
"""SIN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=10)
np.testing.assert_almost_equal(self.y, yy, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_aips_compatibility(self):
"""SIN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
2, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
2, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases_sphere_to_plane(self):
"""SIN projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 1e-8))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""SIN projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""SIN projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""SIN projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 2.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 0.0, 2.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
class TestProjectionTAN(unittest.TestCase):
"""Test gnomonic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['TAN']
self.sphere_to_plane = katpoint.sphere_to_plane['TAN']
N = 100
# Stay away from edge of hemisphere
max_theta = np.pi / 2.0 - 0.01
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
# Perform inverse TAN mapping to spread out points on plane
self.x = np.tan(theta) * np.cos(phi)
self.y = np.tan(theta) * np.sin(phi)
def test_random_closure(self):
"""TAN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""TAN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
# AIPS TAN only deprojects (x, y) coordinates within unit circle
r = self.x * self.x + self.y * self.y
az0, el0 = self.az0[r <= 1.0], self.el0[r <= 1.0]
x, y = self.x[r <= 1.0], self.y[r <= 1.0]
az, el = self.plane_to_sphere(az0, el0, x, y)
xx, yy = self.sphere_to_plane(az0, el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
3, az0[n], el0[n], x[n], y[n])
x_aips[n], y_aips[n], ierr = dircos(
3, az0[n], el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=10)
assert_angles_almost_equal(el, el_aips, decimal=10)
np.testing.assert_almost_equal(xx, x_aips, decimal=10)
np.testing.assert_almost_equal(yy, y_aips, decimal=10)
def test_corner_cases_sphere_to_plane(self):
"""TAN projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 45 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""TAN projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, -np.pi / 4.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""TAN projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1e6], decimal=4)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-1e6, 0.0], decimal=4)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, 1e6], decimal=4)
def test_out_of_range_cases_plane_to_sphere(self):
"""TAN projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
class TestProjectionARC(unittest.TestCase):
"""Test zenithal equidistant projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['ARC']
self.sphere_to_plane = katpoint.sphere_to_plane['ARC']
N = 100
# Stay away from edge of circle
max_theta = np.pi - 0.01
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# (x, y) points within circle of radius pi
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
self.x = theta * np.cos(phi)
self.y = theta * np.sin(phi)
def test_random_closure(self):
"""ARC projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""ARC projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
4, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
4, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=8)
assert_angles_almost_equal(el, el_aips, decimal=8)
np.testing.assert_almost_equal(xx, x_aips, decimal=8)
np.testing.assert_almost_equal(yy, y_aips, decimal=8)
def test_corner_cases_sphere_to_plane(self):
"""ARC projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
# Point diametrically opposite the reference point on sphere
xy = np.array(self.sphere_to_plane(np.pi, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(np.abs(xy), [np.pi, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""ARC projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 1.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -1.0], decimal=12)
# Points on circle with radius pi in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, np.pi, 0.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -np.pi, 0.0))
assert_angles_almost_equal(ae, [-np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, np.pi / 2.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -np.pi / 2.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""ARC projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""ARC projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 4.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 0.0, 4.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 4.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 4.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 4.0, 0.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 4.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
class TestProjectionSTG(unittest.TestCase):
"""Test stereographic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['STG']
self.sphere_to_plane = katpoint.sphere_to_plane['STG']
N = 100
# Stay well away from point of projection
max_theta = 0.8 * np.pi
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# Perform inverse STG mapping to spread out points on plane
theta = max_theta * rs.rand(N)
r = 2.0 * np.sin(theta) / (1.0 + np.cos(theta))
phi = 2 * np.pi * rs.rand(N)
self.x = r * np.cos(phi)
self.y = r * np.sin(phi)
def test_random_closure(self):
"""STG projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=9)
np.testing.assert_almost_equal(self.y, yy, decimal=9)
assert_angles_almost_equal(az, aa, decimal=9)
assert_angles_almost_equal(el, ee, decimal=9)
def test_aips_compatibility(self):
"""STG projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
6, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
6, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
# AIPS NEWPOS STG has poor accuracy on azimuth angle (large closure errors by itself)
# assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases_sphere_to_plane(self):
"""STG projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-2.0, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""STG projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on circle of radius 2.0 in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -2.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -2.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""STG projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-894.42495493, 0.0], decimal=8)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""STG projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
class TestProjectionCAR(unittest.TestCase):
"""Test plate carree projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['CAR']
self.sphere_to_plane = katpoint.sphere_to_plane['CAR']
N = 100
# Unrestricted (az0, el0) points on sphere
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
self.el0 = np.pi * (rs.rand(N) - 0.5)
# Unrestricted (x, y) points on corresponding plane
self.x = np.pi * (2.0 * rs.rand(N) - 1.0)
self.y = np.pi * (rs.rand(N) - 0.5)
def test_random_closure(self):
"""CAR projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=12)
np.testing.assert_almost_equal(self.y, yy, decimal=12)
assert_angles_almost_equal(az, aa, decimal=12)
assert_angles_almost_equal(el, ee, decimal=12)
def sphere_to_plane_original_ssn(target_az, target_el, scan_az, scan_el):
"""Mattieu's original version of SSN projection."""
ll = np.cos(target_el) * np.sin(target_az - scan_az)
mm = np.cos(target_el) * np.sin(scan_el) * np.cos(
target_az - scan_az) - np.cos(scan_el) * np.sin(target_el)
return ll, mm
def plane_to_sphere_original_ssn(target_az, target_el, ll, mm):
"""Mattieu's original version of SSN projection."""
scan_az = target_az - np.arcsin(np.clip(ll / np.cos(target_el), -1.0, 1.0))
scan_el = np.arcsin(np.clip(
(np.sqrt(1.0 - ll**2 - mm**2) * np.sin(target_el) +
np.sqrt(np.cos(target_el)**2 - ll**2) * mm) / (1.0 - ll**2), -1.0, 1.0))
return scan_az, scan_el
class TestProjectionSSN(unittest.TestCase):
"""Test swapped orthographic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['SSN']
self.sphere_to_plane = katpoint.sphere_to_plane['SSN']
N = 100
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# (x, y) points within complicated SSN domain - clipped unit circle
cos_el0 = np.cos(self.el0)
# The x coordinate is bounded by +- cos(el0)
self.x = (2 * rs.rand(N) - 1) * cos_el0
# The y coordinate ranges between two (semi-)circles centred on origin:
# the unit circle on one side and circle of radius cos(el0) on other side
y_offset = -np.sqrt(cos_el0 ** 2 - self.x ** 2)
y_range = -y_offset + np.sqrt(1.0 - self.x ** 2)
self.y = (y_range * rs.rand(N) + y_offset) * np.sign(self.el0)
def test_random_closure(self):
"""SSN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=10)
np.testing.assert_almost_equal(self.y, yy, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_vs_original_ssn(self):
"""SSN projection: compare against Mattieu's original version."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
ll, mm = sphere_to_plane_original_ssn(self.az0, self.el0, az, el)
aa, ee = plane_to_sphere_original_ssn(self.az0, self.el0, ll, mm)
np.testing.assert_almost_equal(self.x, ll, decimal=10)
np.testing.assert_almost_equal(self.y, -mm, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_corner_cases_sphere_to_plane(self):
"""SSN projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 1e-8))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""SSN projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Test valid (x, y) domain
ae = np.array(self.plane_to_sphere(0.0, 1.0, 0.0, -np.cos(1.0)))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -1.0, 0.0, np.cos(1.0)))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""SSN projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""SSN projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 2.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 0.0, 2.0)
with out_of_range_context('nan'):
# Bad el0 > 90 degrees
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
# Bad (x, y) vector length > 1.0
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
# Bad x coordinate > cos(el0)
ae = np.array(self.plane_to_sphere(0.0, np.pi / 2.0, 1.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, np.pi / 2.0, -1.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
# Bad y coordinate -> den < 0
ae = np.array(self.plane_to_sphere(0.0, np.pi / 2.0, 0.0, -1.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
| 53.487321
| 98
| 0.602869
| 8,232
| 48,513
| 3.374757
| 0.038751
| 0.067456
| 0.057773
| 0.041323
| 0.884561
| 0.870847
| 0.854037
| 0.835823
| 0.817033
| 0.801483
| 0
| 0.064583
| 0.243564
| 48,513
| 906
| 99
| 53.546358
| 0.692454
| 0.109723
| 0
| 0.732493
| 0
| 0
| 0.015186
| 0
| 0
| 0
| 0
| 0
| 0.340336
| 1
| 0.071429
| false
| 0.001401
| 0.014006
| 0.001401
| 0.105042
| 0.001401
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bcf2115d8e1f602ed7bead3e5da7ba0420c2e8bd
| 210
|
py
|
Python
|
src/datasets/__init__.py
|
likojack/bnv_fusion
|
76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614
|
[
"MIT"
] | null | null | null |
src/datasets/__init__.py
|
likojack/bnv_fusion
|
76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614
|
[
"MIT"
] | null | null | null |
src/datasets/__init__.py
|
likojack/bnv_fusion
|
76b7354c6f3bf8c7f7e1ff4d958de0e73ec3e614
|
[
"MIT"
] | null | null | null |
from .datasets import register, get_dataset
from .fusion_dataset import FusionRefinerDataset
from .fusion_dataset import FusionRefinerScanNetDataset
from .fusion_inference_dataset import FusionInferenceDataset
| 42
| 60
| 0.895238
| 22
| 210
| 8.318182
| 0.5
| 0.163934
| 0.185792
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080952
| 210
| 4
| 61
| 52.5
| 0.948187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c14ca0858f9ae268fff7b88b2c385660701946c
| 19
|
py
|
Python
|
JuneLong19/xorprop.py
|
mayank-kumar-giri/Competitive-Coding
|
4cd26ede051bad15bf25cfd037317c313b607507
|
[
"MIT"
] | null | null | null |
JuneLong19/xorprop.py
|
mayank-kumar-giri/Competitive-Coding
|
4cd26ede051bad15bf25cfd037317c313b607507
|
[
"MIT"
] | null | null | null |
JuneLong19/xorprop.py
|
mayank-kumar-giri/Competitive-Coding
|
4cd26ede051bad15bf25cfd037317c313b607507
|
[
"MIT"
] | null | null | null |
print((189748^7)^7)
| 19
| 19
| 0.684211
| 4
| 19
| 3.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 0
| 19
| 1
| 19
| 19
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
4c6e73223c1a6be522a33cae46307743ec7f7804
| 7,127
|
py
|
Python
|
resource/production_tests/wrapper/obfuscate/goalkeeper.py
|
qmao/webds_api
|
ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1
|
[
"BSD-3-Clause"
] | null | null | null |
resource/production_tests/wrapper/obfuscate/goalkeeper.py
|
qmao/webds_api
|
ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1
|
[
"BSD-3-Clause"
] | null | null | null |
resource/production_tests/wrapper/obfuscate/goalkeeper.py
|
qmao/webds_api
|
ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1
|
[
"BSD-3-Clause"
] | null | null | null |
print('Start to run bootstrap code')
import sys
sys.path.append("/usr/local/syna/lib/python/production_tests/wrapper/obfuscate/pytransform")
from pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x07\x00\x42\x0d\x0d\x0a\x09\x30\xe0\x02\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x7a\x06\x00\x00\x00\x00\x00\x08\x1e\x81\x8e\x10\xd1\x52\x94\x8c\x3f\x6e\x7e\x0c\x38\x52\x8c\xc7\x00\x00\x00\x00\x00\x00\x00\x00\xb0\x4e\xd4\x1c\x5b\xbb\xeb\xf5\x83\x7f\xb8\x86\xcc\x20\xf7\x6b\xde\x86\x87\x3b\x22\xe6\xbf\xe1\x8c\x98\x92\xf0\xd6\xa7\x41\xd8\xb2\x74\x91\xed\xe8\x1a\x44\x39\xc7\x43\x6f\x1b\xf5\x80\x0f\xfe\x8f\x0c\x1c\x43\xc2\xd8\xa2\x4f\x67\x68\x7d\x8c\x79\x83\xa8\x21\xd5\x15\x91\xd7\x8a\x97\x49\xb0\xf7\xfc\xbd\xed\xd8\xde\xd8\x90\x90\xac\xbb\xe2\x0d\xf6\x59\xba\xd4\x80\x40\x3c\x46\x0b\x32\x56\x8d\xdd\xa9\x66\xff\xcf\x71\x29\x48\x33\x17\x28\xcc\xa8\xca\x71\x83\x0e\xf2\xd0\x5b\x6a\xc6\x1a\x08\x3a\xf2\xbc\x77\x76\x8b\x9b\x92\x73\xa3\xd3\x67\x6d\xc2\x75\x16\xcf\x28\x8d\xdd\x22\x84\x8e\x01\xfd\x03\x00\x2d\x97\xb2\xbc\x59\xb5\xac\x15\x4c\xec\x84\x1d\x6d\x29\x84\xc6\xee\xb2\xc6\xa0\xe9\xb5\x73\xde\x5c\x16\x14\x4b\x9c\x29\xed\xa1\xde\xdc\xbf\xde\xb6\x5d\x8e\xd1\x32\x23\xc0\x98\xab\x8e\x6f\x76\x05\xc0\x97\x07\x8f\x13\x9d\x9d\xfc\x5a\xf0\x5c\x45\xfb\x17\xa8\x09\x51\x11\xea\x7a\x72\x2e\x45\x69\x63\x18\x9e\x47\xbb\x49\x2c\x31\x39\x0b\x89\x09\x37\xee\x66\x6d\xad\xb6\x2d\x24\xb3\x3f\xff\x78\xb0\x41\x41\x51\xee\x12\x9d\x9e\x31\x38\x47\xe1\x8c\x74\xfd\x27\x27\xba\xeb\x13\x3c\x2f\xe8\x7f\xf4\xf6\x65\x53\x86\xea\x1d\x4f\x76\xa9\x80\x5f\x7e\xd3\xa6\xd4\xfd\xee\x97\x94\x77\xdc\x87\xf0\x52\xf8\xed\x2c\x2f\x99\x43\x20\x45\x42\x66\x76\x7c\xd9\xcc\x27\xd6\x91\x20\x71\x3f\xbc\x7f\xb2\x42\x76\xe9\x8d\x15\xbc\xfa\x19\x3d\x37\x1b\xe2\x74\x20\xe3\x0f\xb4\x5a\x1a\x5f\xa1\x4d\xe8\x2e\x3b\x1c\x1e\x2b\x07\x83\x4b\x50\xb3\xe4\x0f\xb7\x39\x5b\x96\xa7\x4f\xf0\x55\xa2\x4a\xc8\x51\xda\x0c\x0f\x75\xc6\xca\x36\x49\xad\x60\xc2\x12\x4d\xd3\x85\x9e\x8b\xec\x9e\x80\xc6\x43\xec\xcb\x13\x63\xcc\x80\x60\xb7\x3a\x74\x00\xbc\x1b\xdc\x48\x60\x6b\x16\xbf\xd0\x13\xb7\x7b\xf7\xca\x9f\xcc\xe8\xf1\xab\x43\xe3\x78\xbe\xae\xc9\x4a\xd9\xf9\x6a\x87\x87\xeb\x93\x24\x69\xb2\xde\x77\xc0\x82\xdc\x91\xc9\x67\x98\x50\x6c\x5d\xe6\xe5\x22\x73\xa0\x49\x7b\x6d\x77\xf1\xbb\xfa\xa3\xb2\xae\xc3\x31\x60\x81\xb7\x2b\x39\xc4\x74\x03\x5d\x14\x4b\xca\xa0\x99\xf4\x56\xfa\xf8\x48\xe1\x03\x04\x6f\x79\x61\xc3\x65\x72\x04\x6b\x61\x5b\x25\x6d\xa8\x67\x8b\x8c\x67\xf8\xb3\x96\x7f\x87\xac\x73\x54\xc2\x58\xa9\xff\x9e\xd2\x70\xcc\x27\xa0\xc7\xb1\x95\x32\x41\x7b\x52\x02\xd2\xda\x85\xae\x62\x6a\x48\xe5\x0d\xee\x7c\x66\x71\xd1\xa8\x1c\xfb\xef\x62\xa0\xed\xb9\xa5\x70\x45\x3f\xba\xd9\x0e\x8b\xcf\xa4\xec\xf0\x97\xfe\x3d\x4c\xca\x9e\x95\x78\xbb\xfd\xdc\x72\x1e\x17\x96\xa5\xe8\xd3\x55\x53\xf6\x00\xb1\xac\xc3\x30\x51\x43\xe3\x77\xfc\xd4\x3a\xc9\xb8\x5b\xc7\x3a\x0a\x31\xbb\x34\xd1\xee\xe7\xf0\xd4\xf4\x00\x78\x58\xa1\x67\x50\x43\x90\x4a\x51\x6b\x75\xbd\xd4\x74\xaf\xba\x1f\xc3\xe5\xb7\x80\x8c\xaf\x25\x62\xa6\xe3\x23\x50\x60\x2a\xc7\xe6\x52\x9e\xe6\xa9\xb7\x4f\xc0\x4c\xa5\x45\xfe\x17\xe8\x8f\xea\x69\x02\xf5\x23\xa9\xc2\x3f\x26\xf0\xba\xaf\x97\x2a\x17\xe3\xc7\x83\x54\x3d\xea\xad\xe9\xa8\x5e\x10\x12\x3d\x56\xf0\x57\x76\x40\xd2\xe5\xc3\x81\x4d\x6f\x0c\xea\x5d\x15\x93\x94\x47\xce\x1f\x5f\xd1\xf9\xf2\x04\x20\x9b\x40\x7e\x9f\x9b\xe2\x9f\x89\x60\x92\x85\x8f\x53\x3b\x2a\x71\x20\xb8\xdf\x77\x10\xcf\x37\x8a\x69\x06\x01\xc9\xe6\x59\xde\x57\x84\xe9\x91\x64\xe1\xc5\x43\x2b\x79\xe6\x85\xb8\x66\x7a\xaa\x08\x0a\xbf\xe2\x49\xf5\x9b\x64\x75\xfa\xc8\xae\x0b\x1e\xfb\x0a\x6d\x68\x94\x5e\x09\x9e\x12\xfb\x3c\xe1\xa8\x91\xb3\xf8\x0c\x70\x78\xc7\xa3\x1d\xd1\xeb\x7f\x0f\xcf\xc3\xc5\x7c\x9c\x2f\x8d\x79\xe2\x41\x51\x58\x25\xa7\x24\xbe\xd6\x7c\x33\x98\x8d\xb7\x54\xeb\xbb\x0b\x27\x13\x79\x40\xe0\x8b\xb9\xe7\x72\xbc\x8c\x63\x12\x34\x71\x54\x34\xcc\xec\x36\x63\x15\xb9\xb6\x55\x25\x67\xc4\xff\xee\xff\x5f\xc1\xa1\x97\xc1\xe4\xea\x23\xb9\xb2\x2f\xc5\xa5\xf3\xa3\xac\x2d\xd8\x50\x60\x81\x33\x56\xd4\x5e\x08\xc1\xdf\x49\xcc\xea\x4f\x2d\xcb\xfa\x3e\xce\x84\x94\xbf\x0c\xfb\xf1\x9f\x86\x10\xf4\x54\xcb\x1d\x97\x80\x85\xd9\x16\x1b\x47\x1b\xcd\x2c\x30\x1a\xc9\xc2\x0e\x3f\x1b\xf2\x76\x5b\x79\xfe\x92\x92\x65\x5b\x1f\xf7\x7c\xd1\x26\x3b\xd7\x45\x32\xcc\x1a\x31\x93\x1a\xd0\xe0\x14\x50\x4e\x6b\xe0\x47\x38\xcc\xb1\x00\x21\x6e\xfe\xcb\x6f\x99\x85\xae\x26\xcc\x7f\xca\x3f\x6b\x8c\x2d\x9c\x75\x79\xc1\xe0\x7c\xaa\x9d\x1a\x47\x45\x07\x07\xfe\x71\xf8\xdc\x83\x7e\x79\x32\xdc\xcc\xf0\x52\xe4\x38\xff\xb3\xa3\xdb\x9f\xf5\x8c\x78\xe1\x77\xdd\x8e\xc9\x4f\x3b\x70\xb5\x0c\x1a\xd7\xba\xca\x7d\x53\xd6\x5a\x0d\xc3\x21\xd4\xd2\xec\xca\xcd\xc1\xa9\x46\x50\xa4\xb4\x49\xfd\xd8\xdf\x6f\x25\xd0\xfb\x83\x50\xfc\x3e\x75\x96\x7c\x56\xaf\xae\x88\x14\xbb\x7c\x4a\x99\xf6\x44\x40\x34\x5f\x3c\x45\xea\x2c\xcb\x99\xa6\xa5\x18\xd1\xbf\xd9\x40\x5f\x2f\x0a\x8b\x1c\xd4\x5c\xa0\xeb\x95\xc5\xda\xfd\x10\xee\xf0\x7a\x8b\xb5\xe4\x94\xff\xe2\x29\x57\xfe\x48\x73\x22\x8d\x91\x17\x16\x88\xf2\xa8\x7a\x5c\x20\x2f\xb1\xdc\xc8\xae\x8d\x63\x39\xda\xc3\xe2\xc1\xaa\xa6\xf5\x29\x97\x08\x1f\x77\x24\x88\x4d\xa2\xf0\x51\x05\x62\x24\x43\x6b\xa7\x8d\x2d\xc7\x3b\xed\xdb\x39\xf5\x2d\xe9\xb7\x65\xcd\x9b\x2e\x6f\xc5\x3e\x2c\xc5\x17\x22\x4d\x6e\x8f\xe3\x2d\x37\xf3\x2d\xb8\x4c\x0d\x9b\xd6\xcc\x4b\xbf\x9e\x4f\x22\xb5\xdc\xf3\x55\xe8\x17\x09\x14\x10\x5c\xa2\xe8\x4c\x77\xad\xc4\x75\xe2\x4a\x46\x4c\x1d\xd4\x95\xbb\x5e\x3b\x34\xa0\x2e\xd0\x63\x31\x81\x4a\x73\xdb\x78\x03\x25\x2b\x96\x2b\x6e\xe7\x0c\x79\x61\xba\x0d\xb4\x24\x72\x27\xc6\xa0\x0b\x2b\x9a\x5d\x0b\xf9\xe8\x2b\x01\xb7\x42\x67\x3d\xc2\x7f\xb4\x89\x1f\x33\xe6\xe1\xc7\x5f\xd2\xfd\xa0\xe0\xe3\x9e\xd8\x2f\x16\x25\xd5\x54\x29\xa7\xc5\xf0\x40\x95\x8b\x5d\xd2\xaf\xf7\x00\xe8\x8f\xd6\x48\x4b\x62\x06\x21\xfc\x66\x10\xb6\xb8\x7e\x17\x46\x86\x86\x19\xd5\xf3\xf4\x31\xd2\x74\x5f\x1b\xdb\xdf\x56\x58\x35\x62\xca\xbc\xf1\xd4\x21\xf0\x5b\xa4\x87\xd1\x37\xd6\x52\xa8\xed\x2b\xe2\x01\x76\xd6\xb8\x2a\x48\xb4\x0f\x75\x38\x1f\x2d\x96\xbb\xfd\x26\x04\x98\x8d\xd8\xcc\x62\x97\xba\xcd\x05\xcf\x56\x64\xbd\xf2\x4f\x03\x39\xdf\x87\xa0\x23\xfa\x0b\x4b\xef\x97\xbd\x00\xb6\xd3\x76\x31\x48\x39\xf4\x08\x69\x0e\x35\x0b\x8b\xd8\x55\x1f\x7b\xef\x12\xe5\xf1\x1a\xaa\x4e\x72\x56\x3e\xcb\x51\x90\x96\xa8\x09\x67\xe0\x99\xdb\x38\x93\x8d\xab\xea\x29\xda\x90\x3b\x2c\xec\x3d\xc7\xef\xd8\x2b\x2d\x72\x7d\x14\xd5\x70\x56\x9f\x16\x41\xbf\x7f\xfb\x88\x74\x97\xb4\x6f\x1c\xe3\xe2\x83\x41\x28\x38\xb3\x91\xe8\x8e\x8b\x20\xc8\x02\x04\x30\xa9\xa7\x7a\x24\xea\x5d\x81\xa6\x30\x47\x72\xef\x37\x8b\x2a\x44\x5a\x62\x7b\xc1\x2b\xa7\xa1\x64\xb0\x86\xed\x08\x6a\x88\xf8\x51\xae\x51\xe0\x63\xbc\x0a\xf7\xe2\xca\xaa\x04\xa8\x88\xe9\xd8\xed\x47\xff\x3d\xc9\x8b\x9d\xec\x57\x67\x3e\x3e\x43\xcf\xa6\x53\x9b\x04\x99\xca\xf0\x4f\xa7\x39\xd6\x63\x7a\x23\xe5\xec\x82\xc9\x5c\xde\xb1\x5e\x88\x21\xd6\xb4\x48\x78\xd6\x1c\x4a\x03\xc0\x87\xbc\x77\x8e\xb0\xd1\xc3\x44\xf1\x3c\xeb\x1c\x78\xd4\xe9\xda\x0c\xd4\xa9\x77\x58\x9d\x46\x29\x58\x99\xb7\x1c\x39\x5d\xde\xc0\x96\x19\x8f\x1b\xb6\xf7\xac\xfe\x82\xfc\x71\x17\x1b\xdb\x27\x96\xb0\x63\xd5\x6a\xa7\x51\x71\x84\x66\xd0\xe5\x15\x98\x8f\x6f\x4a\xee\x72\x3f\xf2\x51\x4e\x74\x82\x81\x93\x35\x2b\xf1\x08\xee\x23\x74\x8d\x63\xd5', 2)
| 1,018.142857
| 6,927
| 0.75207
| 1,755
| 7,127
| 3.045584
| 0.161254
| 0.021328
| 0.02189
| 0.017961
| 0.007297
| 0.00449
| 0.00449
| 0
| 0
| 0
| 0
| 0.301688
| 0.002385
| 7,127
| 7
| 6,927
| 1,018.142857
| 0.45007
| 0
| 0
| 0
| 0
| 0.166667
| 0.980359
| 0.976571
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
4c78ca7bae6c2268518e8713646d75d07621d41b
| 8,151
|
py
|
Python
|
fhirclient/r4models/imagingstudy_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | 1
|
2021-12-24T11:14:38.000Z
|
2021-12-24T11:14:38.000Z
|
fhirclient/r4models/imagingstudy_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/r4models/imagingstudy_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import imagingstudy
from .fhirdate import FHIRDate
class ImagingStudyTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ImagingStudy", js["resourceType"])
return imagingstudy.ImagingStudy(js)
def testImagingStudy1(self):
inst = self.instantiate_from("imagingstudy-example-xr.json")
self.assertIsNotNone(inst, "Must have instantiated a ImagingStudy instance")
self.implImagingStudy1(inst)
js = inst.as_json()
self.assertEqual("ImagingStudy", js["resourceType"])
inst2 = imagingstudy.ImagingStudy(js)
self.implImagingStudy1(inst2)
def implImagingStudy1(self, inst):
self.assertEqual(inst.id, "example-xr")
self.assertEqual(inst.identifier[0].system, "urn:dicom:uid")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.124.113543.6003.1154777499.30246.19789.3503430046")
self.assertEqual(inst.identifier[1].type.coding[0].code, "ACSN")
self.assertEqual(inst.identifier[1].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/v2-0203")
self.assertEqual(inst.identifier[1].use, "usual")
self.assertEqual(inst.identifier[1].value, "W12342398")
self.assertEqual(inst.identifier[2].use, "secondary")
self.assertEqual(inst.identifier[2].value, "55551234")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.modality[0].code, "DX")
self.assertEqual(inst.modality[0].system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.note[0].text, "XR Wrist 3+ Views")
self.assertEqual(inst.numberOfInstances, 2)
self.assertEqual(inst.numberOfSeries, 1)
self.assertEqual(inst.procedureCode[0].coding[0].code, "RPID2589")
self.assertEqual(inst.procedureCode[0].coding[0].display, "XR Wrist 3+ Views")
self.assertEqual(inst.procedureCode[0].coding[0].system, "http://www.radlex.org")
self.assertEqual(inst.procedureCode[0].text, "XR Wrist 3+ Views")
self.assertEqual(inst.reasonCode[0].coding[0].code, "357009")
self.assertEqual(inst.reasonCode[0].coding[0].display, "Closed fracture of trapezoidal bone of wrist")
self.assertEqual(inst.reasonCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.series[0].bodySite.code, "T-15460")
self.assertEqual(inst.series[0].bodySite.display, "Wrist Joint")
self.assertEqual(inst.series[0].bodySite.system, "http://snomed.info/sct")
self.assertEqual(inst.series[0].description, "XR Wrist 3+ Views")
self.assertEqual(inst.series[0].instance[0].number, 1)
self.assertEqual(inst.series[0].instance[0].sopClass.code, "urn:oid:1.2.840.10008.5.1.4.1.1.2")
self.assertEqual(inst.series[0].instance[0].sopClass.system, "urn:ietf:rfc:3986")
self.assertEqual(inst.series[0].instance[0].title, "PA VIEW")
self.assertEqual(inst.series[0].instance[0].uid, "2.16.124.113543.6003.1154777499.30246.19789.3503430045.1.1")
self.assertEqual(inst.series[0].instance[1].number, 2)
self.assertEqual(inst.series[0].instance[1].sopClass.code, "urn:oid:1.2.840.10008.5.1.4.1.1.2")
self.assertEqual(inst.series[0].instance[1].sopClass.system, "urn:ietf:rfc:3986")
self.assertEqual(inst.series[0].instance[1].title, "LL VIEW")
self.assertEqual(inst.series[0].instance[1].uid, "2.16.124.113543.6003.1154777499.30246.19789.3503430045.1.2")
self.assertEqual(inst.series[0].laterality.code, "419161000")
self.assertEqual(inst.series[0].laterality.display, "Unilateral left")
self.assertEqual(inst.series[0].laterality.system, "http://snomed.info/sct")
self.assertEqual(inst.series[0].modality.code, "DX")
self.assertEqual(inst.series[0].modality.system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.series[0].number, 3)
self.assertEqual(inst.series[0].numberOfInstances, 2)
self.assertEqual(inst.series[0].performer[0].function.coding[0].code, "PRF")
self.assertEqual(inst.series[0].performer[0].function.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ParticipationType")
self.assertEqual(inst.series[0].started.date, FHIRDate("2011-01-01T11:01:20+03:00").date)
self.assertEqual(inst.series[0].started.as_json(), "2011-01-01T11:01:20+03:00")
self.assertEqual(inst.series[0].uid, "2.16.124.113543.6003.1154777499.30246.19789.3503430045.1")
self.assertEqual(inst.started.date, FHIRDate("2017-01-01T11:01:20+03:00").date)
self.assertEqual(inst.started.as_json(), "2017-01-01T11:01:20+03:00")
self.assertEqual(inst.status, "available")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">XR Wrist 3+ Views. John Smith (MRN: 09236). Accession: W12342398. Performed: 2017-01-01. 1 series, 2 images.</div>")
self.assertEqual(inst.text.status, "generated")
def testImagingStudy2(self):
inst = self.instantiate_from("imagingstudy-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ImagingStudy instance")
self.implImagingStudy2(inst)
js = inst.as_json()
self.assertEqual("ImagingStudy", js["resourceType"])
inst2 = imagingstudy.ImagingStudy(js)
self.implImagingStudy2(inst2)
def implImagingStudy2(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:dicom:uid")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.124.113543.6003.1154777499.30246.19789.3503430045")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.numberOfInstances, 1)
self.assertEqual(inst.numberOfSeries, 1)
self.assertEqual(inst.series[0].bodySite.code, "67734004")
self.assertEqual(inst.series[0].bodySite.display, "Upper Trunk Structure")
self.assertEqual(inst.series[0].bodySite.system, "http://snomed.info/sct")
self.assertEqual(inst.series[0].description, "CT Surview 180")
self.assertEqual(inst.series[0].instance[0].number, 1)
self.assertEqual(inst.series[0].instance[0].sopClass.code, "urn:oid:1.2.840.10008.5.1.4.1.1.2")
self.assertEqual(inst.series[0].instance[0].sopClass.system, "urn:ietf:rfc:3986")
self.assertEqual(inst.series[0].instance[0].uid, "2.16.124.113543.6003.189642796.63084.16748.2599092903")
self.assertEqual(inst.series[0].modality.code, "CT")
self.assertEqual(inst.series[0].modality.system, "http://dicom.nema.org/resources/ontology/DCM")
self.assertEqual(inst.series[0].number, 3)
self.assertEqual(inst.series[0].numberOfInstances, 1)
self.assertEqual(inst.series[0].uid, "2.16.124.113543.6003.2588828330.45298.17418.2723805630")
self.assertEqual(inst.started.date, FHIRDate("2011-01-01T11:01:20+03:00").date)
self.assertEqual(inst.started.as_json(), "2011-01-01T11:01:20+03:00")
self.assertEqual(inst.status, "available")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">CT Chest. John Smith (MRN: 09236). Accession: W12342398. Performed: 2011-01-01. 3 series, 12 images.</div>")
self.assertEqual(inst.text.status, "generated")
| 62.7
| 201
| 0.685928
| 1,092
| 8,151
| 5.110806
| 0.18315
| 0.228454
| 0.279161
| 0.1747
| 0.82745
| 0.776563
| 0.745386
| 0.611181
| 0.547393
| 0.525712
| 0
| 0.109012
| 0.149184
| 8,151
| 129
| 202
| 63.186047
| 0.695746
| 0.014354
| 0
| 0.309735
| 1
| 0.079646
| 0.249128
| 0.090433
| 0
| 0
| 0
| 0
| 0.769912
| 1
| 0.044248
| false
| 0
| 0.053097
| 0
| 0.115044
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d5cc46127ff5a6697c942492d307b6b294d3b6e9
| 96
|
py
|
Python
|
backend/core/models/__init__.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
backend/core/models/__init__.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
backend/core/models/__init__.py
|
jesusmaherrera/django-nuxtjs
|
f8d9500fb236c4cd938e9a6bbaf8063e545dd6fe
|
[
"MIT"
] | null | null | null |
from backend.core.models.choice import Choice
from backend.core.models.question import Question
| 32
| 49
| 0.854167
| 14
| 96
| 5.857143
| 0.5
| 0.268293
| 0.365854
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 50
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d5e76bed47499c7084a8acbe0e18ea083ffaa957
| 11,049
|
py
|
Python
|
views.py
|
jooonyliks/feeds
|
0d52a54baa0df51ae86e82d61063840124b8718c
|
[
"MIT"
] | null | null | null |
views.py
|
jooonyliks/feeds
|
0d52a54baa0df51ae86e82d61063840124b8718c
|
[
"MIT"
] | null | null | null |
views.py
|
jooonyliks/feeds
|
0d52a54baa0df51ae86e82d61063840124b8718c
|
[
"MIT"
] | 1
|
2019-06-19T05:13:43.000Z
|
2019-06-19T05:13:43.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
from xml.dom.minidom import parse,Node
import xml.dom.minidom
import wget
import os
def home(request):
return render(request,"main.html")
def bbc(request):
url = "http://feeds.bbci.co.uk/news/world/rss.xml"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\bbc.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,default_path)
location = default_path
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'bbc.html',context=context)
def washington(request):
url = "https://www.washingtontimes.com/rss/headlines/news/national/"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\washing.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\washing.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\washing.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'washing.html',context=context)
def reuters(request):
url = "http://feeds.reuters.com/Reuters/worldNews"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\reuters.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\reuters.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\reuters.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,"reuters.html",context=context)
def aljazeera(request):
url = "https://www.aljazeera.com/xml/rss/all.xml"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\aljazeera.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\aljazeera.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\aljazeera.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'aljazeera.html',context=context)
def cnn(request):
url = "http://rss.cnn.com/rss/edition.rss"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\cnn.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\cnn.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\cnn.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'cnn.html',context=context)
def dw(request):
url = "http://rss.dw.com/rdf/rss-en-all"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\dw.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\dw.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\dw.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'dw.html',context=context)
def espn(request):
url = "https://www.espn.com/espn/rss/news"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\espn.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\espn.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\espn.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'espn.html',context=context)
def greatgoals(request):
url = "https://www.goal.com/feeds/en/news"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\great-goals.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\great-goals.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\great-goals.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'great-goals.html',context=context)
def skysports(request):
url = "https://www.espn.com/espn/rss/news"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\sky-sports.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\sky-sports.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\sky-sports.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'sky-sports.html',context=context)
def soccernews(request):
url = "http://www.soccernews.com/feed"
default_path = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\soccernews.xml"
if os.path.exists(default_path):
os.remove(default_path)
else:
print("FileNotPresentError")
wget.download(url,"C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\soccernews.xml")
location = "C:\\Users\\dell\\Desktop\\rssfeeds\\feeds\\xmls\\soccernews.xml"
data_source = open(location)
dom1 = parse(data_source)
r= dom1.documentElement
tv = r.getElementsByTagName("item")
d = dict()
for items in tv:
title = items.getElementsByTagName('title')[0]
w = title.childNodes[0].data
description = items.getElementsByTagName('description')[0]
k = description.childNodes[0].data
link = items.getElementsByTagName('link')[0]
web_link = link.childNodes[0].data
d[w] = [k,web_link]
context = {
"title" : d.items()
# "link": b.items()
}
return render(request,'soccernews.html',context=context)
| 33.892638
| 89
| 0.618155
| 1,309
| 11,049
| 5.16272
| 0.073338
| 0.052086
| 0.066588
| 0.070435
| 0.872004
| 0.872004
| 0.872004
| 0.872004
| 0.872004
| 0.771086
| 0
| 0.009345
| 0.225179
| 11,049
| 326
| 90
| 33.892638
| 0.780049
| 0.016201
| 0
| 0.716418
| 0
| 0
| 0.245304
| 0.154328
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041045
| false
| 0
| 0.022388
| 0.003731
| 0.104478
| 0.037313
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5f32093345d25a85510cce99937f04148b2b41a
| 44,910
|
py
|
Python
|
test/unit/test_discovery_v2.py
|
knishika/watson-developer-cloud-python-sdk
|
83370b74e19ebdb851af212ef75f470551b59c94
|
[
"Apache-2.0"
] | 1
|
2020-08-14T16:07:23.000Z
|
2020-08-14T16:07:23.000Z
|
test/unit/test_discovery_v2.py
|
johnjdailey/python-sdk
|
83370b74e19ebdb851af212ef75f470551b59c94
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_discovery_v2.py
|
johnjdailey/python-sdk
|
83370b74e19ebdb851af212ef75f470551b59c94
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import responses
import tempfile
import ibm_watson.discovery_v2
from ibm_watson.discovery_v2 import *
base_url = 'https://fake'
##############################################################################
# Start of Service: Collections
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_collections
#-----------------------------------------------------------------------------
class TestListCollections():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_collections_response(self):
body = self.construct_full_body()
response = fake_response_ListCollectionsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_collections_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ListCollectionsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_list_collections_empty(self):
check_empty_required_params(self, fake_response_ListCollectionsResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/collections'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.list_collections(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
# endregion
##############################################################################
# End of Service: Collections
##############################################################################
##############################################################################
# Start of Service: Queries
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for query
#-----------------------------------------------------------------------------
class TestQuery():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_query_response(self):
body = self.construct_full_body()
response = fake_response_QueryResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_query_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_QueryResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_query_empty(self):
check_empty_required_params(self, fake_response_QueryResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/query'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.POST,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.query(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body.update({"collection_ids": [], "filter": "string1", "query": "string1", "natural_language_query": "string1", "aggregation": "string1", "count": 12345, "return_": [], "offset": 12345, "sort": "string1", "highlight": True, "spelling_suggestions": True, "table_results": QueryLargeTableResults._from_dict(json.loads("""{"enabled": false, "count": 5}""")), "suggested_refinements": QueryLargeSuggestedRefinements._from_dict(json.loads("""{"enabled": false, "count": 5}""")), "passages": QueryLargePassages._from_dict(json.loads("""{"enabled": false, "per_document": true, "max_per_document": 16, "fields": [], "count": 5, "characters": 10}""")), })
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for get_autocompletion
#-----------------------------------------------------------------------------
class TestGetAutocompletion():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_autocompletion_response(self):
body = self.construct_full_body()
response = fake_response_Completions_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_autocompletion_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Completions_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_get_autocompletion_empty(self):
check_empty_required_params(self, fake_response_Completions_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/autocompletion'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.get_autocompletion(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['prefix'] = "string1"
body['collection_ids'] = []
body['field'] = "string1"
body['count'] = 12345
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['prefix'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for query_notices
#-----------------------------------------------------------------------------
class TestQueryNotices():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_query_notices_response(self):
body = self.construct_full_body()
response = fake_response_QueryNoticesResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_query_notices_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_QueryNoticesResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_query_notices_empty(self):
check_empty_required_params(self, fake_response_QueryNoticesResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/notices'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.query_notices(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['filter'] = "string1"
body['query'] = "string1"
body['natural_language_query'] = "string1"
body['count'] = 12345
body['offset'] = 12345
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for list_fields
#-----------------------------------------------------------------------------
class TestListFields():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_fields_response(self):
body = self.construct_full_body()
response = fake_response_ListFieldsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_fields_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ListFieldsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_list_fields_empty(self):
check_empty_required_params(self, fake_response_ListFieldsResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/fields'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.list_fields(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_ids'] = []
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
# endregion
##############################################################################
# End of Service: Queries
##############################################################################
##############################################################################
# Start of Service: ComponentSettings
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_component_settings
#-----------------------------------------------------------------------------
class TestGetComponentSettings():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_component_settings_response(self):
body = self.construct_full_body()
response = fake_response_ComponentSettingsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_component_settings_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ComponentSettingsResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_get_component_settings_empty(self):
check_empty_required_params(self, fake_response_ComponentSettingsResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/component_settings'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.get_component_settings(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
# endregion
##############################################################################
# End of Service: ComponentSettings
##############################################################################
##############################################################################
# Start of Service: Documents
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for add_document
#-----------------------------------------------------------------------------
class TestAddDocument():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_add_document_response(self):
body = self.construct_full_body()
response = fake_response_DocumentAccepted_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_add_document_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DocumentAccepted_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_add_document_empty(self):
check_empty_required_params(self, fake_response_DocumentAccepted_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/collections/{1}/documents'.format(body['project_id'], body['collection_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.POST,
url,
body=json.dumps(response),
status=202,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.add_document(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
body['file'] = tempfile.NamedTemporaryFile()
body['filename'] = "string1"
body['file_content_type'] = "string1"
body['metadata'] = "string1"
body['x_watson_discovery_force'] = True
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for update_document
#-----------------------------------------------------------------------------
class TestUpdateDocument():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_update_document_response(self):
body = self.construct_full_body()
response = fake_response_DocumentAccepted_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_update_document_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DocumentAccepted_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_update_document_empty(self):
check_empty_required_params(self, fake_response_DocumentAccepted_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(body['project_id'], body['collection_id'], body['document_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.POST,
url,
body=json.dumps(response),
status=202,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.update_document(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
body['document_id'] = "string1"
body['file'] = tempfile.NamedTemporaryFile()
body['filename'] = "string1"
body['file_content_type'] = "string1"
body['metadata'] = "string1"
body['x_watson_discovery_force'] = True
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
body['document_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for delete_document
#-----------------------------------------------------------------------------
class TestDeleteDocument():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_document_response(self):
body = self.construct_full_body()
response = fake_response_DeleteDocumentResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_document_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DeleteDocumentResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_document_empty(self):
check_empty_required_params(self, fake_response_DeleteDocumentResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(body['project_id'], body['collection_id'], body['document_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.delete_document(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
body['document_id'] = "string1"
body['x_watson_discovery_force'] = True
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['collection_id'] = "string1"
body['document_id'] = "string1"
return body
# endregion
##############################################################################
# End of Service: Documents
##############################################################################
##############################################################################
# Start of Service: TrainingData
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_training_queries
#-----------------------------------------------------------------------------
class TestListTrainingQueries():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_training_queries_response(self):
body = self.construct_full_body()
response = fake_response_TrainingQuerySet_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_list_training_queries_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_TrainingQuerySet_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_list_training_queries_empty(self):
check_empty_required_params(self, fake_response_TrainingQuerySet_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/training_data/queries'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.list_training_queries(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for delete_training_queries
#-----------------------------------------------------------------------------
class TestDeleteTrainingQueries():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_training_queries_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_training_queries_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_delete_training_queries_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/training_data/queries'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.DELETE,
url,
body=json.dumps(response),
status=204,
content_type='')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.delete_training_queries(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for create_training_query
#-----------------------------------------------------------------------------
class TestCreateTrainingQuery():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_create_training_query_response(self):
body = self.construct_full_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_create_training_query_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_create_training_query_empty(self):
check_empty_required_params(self, fake_response_TrainingQuery_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/training_data/queries'.format(body['project_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.create_training_query(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body.update({"natural_language_query": "string1", "examples": [], "filter": "string1", })
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body.update({"natural_language_query": "string1", "examples": [], "filter": "string1", })
return body
#-----------------------------------------------------------------------------
# Test Class for get_training_query
#-----------------------------------------------------------------------------
class TestGetTrainingQuery():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_training_query_response(self):
body = self.construct_full_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_get_training_query_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_get_training_query_empty(self):
check_empty_required_params(self, fake_response_TrainingQuery_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/training_data/queries/{1}'.format(body['project_id'], body['query_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.GET,
url,
body=json.dumps(response),
status=200,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.get_training_query(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['query_id'] = "string1"
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['query_id'] = "string1"
return body
#-----------------------------------------------------------------------------
# Test Class for update_training_query
#-----------------------------------------------------------------------------
class TestUpdateTrainingQuery():
#--------------------------------------------------------
# Test 1: Send fake data and check response
#--------------------------------------------------------
@responses.activate
def test_update_training_query_response(self):
body = self.construct_full_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 2: Send only required fake data and check response
#--------------------------------------------------------
@responses.activate
def test_update_training_query_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_TrainingQuery_json
send_request(self, body, response)
assert len(responses.calls) == 1
#--------------------------------------------------------
# Test 3: Send empty data and check response
#--------------------------------------------------------
@responses.activate
def test_update_training_query_empty(self):
check_empty_required_params(self, fake_response_TrainingQuery_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
#-----------
#- Helpers -
#-----------
def make_url(self, body):
endpoint = '/v2/projects/{0}/training_data/queries/{1}'.format(body['project_id'], body['query_id'])
url = '{0}{1}'.format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(responses.POST,
url,
body=json.dumps(response),
status=201,
content_type='application/json')
def call_service(self, body):
service = DiscoveryV2(
authenticator=NoAuthAuthenticator(),
version='2019-11-22',
)
service.set_service_url(base_url)
output = service.update_training_query(**body)
return output
def construct_full_body(self):
body = dict()
body['project_id'] = "string1"
body['query_id'] = "string1"
body.update({"natural_language_query": "string1", "examples": [], "filter": "string1", })
return body
def construct_required_body(self):
body = dict()
body['project_id'] = "string1"
body['query_id'] = "string1"
body.update({"natural_language_query": "string1", "examples": [], "filter": "string1", })
return body
# endregion
##############################################################################
# End of Service: TrainingData
##############################################################################
def check_empty_required_params(obj, response):
"""Test function to assert that the operation will throw an error when given empty required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
body = {k: None for k in body.keys()}
error = False
try:
send_request(obj, body, response)
except ValueError as e:
error = True
assert error
def check_missing_required_params(obj):
"""Test function to assert that the operation will throw an error when missing required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
error = False
try:
send_request(obj, {}, {}, url=url)
except TypeError as e:
error = True
assert error
def check_empty_response(obj):
"""Test function to assert that the operation will return an empty response when given an empty request
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
send_request(obj, {}, {}, url=url)
def send_request(obj, body, response, url=None):
"""Test function to create a request, send it, and assert its accuracy to the mock response
Args:
obj: The generated test function
body: Dict filled with fake data for calling the service
response_str: Mock response string
"""
if not url:
url = obj.make_url(body)
obj.add_mock_response(url, response)
output = obj.call_service(body)
assert responses.calls[0].request.url.startswith(url)
assert output.get_result() == response
####################
## Mock Responses ##
####################
fake_response__json = None
fake_response_ListCollectionsResponse_json = """{"collections": []}"""
fake_response_QueryResponse_json = """{"matching_results": 16, "results": [], "aggregations": [], "retrieval_details": {"document_retrieval_strategy": "fake_document_retrieval_strategy"}, "suggested_query": "fake_suggested_query", "suggested_refinements": [], "table_results": []}"""
fake_response_Completions_json = """{"completions": []}"""
fake_response_QueryNoticesResponse_json = """{"matching_results": 16, "notices": []}"""
fake_response_ListFieldsResponse_json = """{"fields": []}"""
fake_response_ComponentSettingsResponse_json = """{"fields_shown": {"body": {"use_passage": false, "field": "fake_field"}, "title": {"field": "fake_field"}}, "autocomplete": true, "structured_search": false, "results_per_page": 16, "aggregations": []}"""
fake_response_DocumentAccepted_json = """{"document_id": "fake_document_id", "status": "fake_status"}"""
fake_response_DocumentAccepted_json = """{"document_id": "fake_document_id", "status": "fake_status"}"""
fake_response_DeleteDocumentResponse_json = """{"document_id": "fake_document_id", "status": "fake_status"}"""
fake_response_TrainingQuerySet_json = """{"queries": []}"""
fake_response_TrainingQuery_json = """{"query_id": "fake_query_id", "natural_language_query": "fake_natural_language_query", "filter": "fake_filter", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
fake_response_TrainingQuery_json = """{"query_id": "fake_query_id", "natural_language_query": "fake_natural_language_query", "filter": "fake_filter", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
fake_response_TrainingQuery_json = """{"query_id": "fake_query_id", "natural_language_query": "fake_natural_language_query", "filter": "fake_filter", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
| 38.15633
| 656
| 0.510621
| 4,027
| 44,910
| 5.467097
| 0.068041
| 0.03561
| 0.022892
| 0.038154
| 0.851245
| 0.845567
| 0.840798
| 0.835938
| 0.825809
| 0.812591
| 0
| 0.01518
| 0.210822
| 44,910
| 1,176
| 657
| 38.188776
| 0.606004
| 0.264485
| 0
| 0.802817
| 0
| 0.008451
| 0.132089
| 0.037349
| 0
| 0
| 0
| 0
| 0.064789
| 1
| 0.16338
| false
| 0.002817
| 0.012676
| 0
| 0.274648
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5fba53d0b0f204ebb8efec9eacf324cc15e2ead
| 163
|
py
|
Python
|
sources/cloudflare.py
|
drosoCode/StatusExporter
|
8dd099399eb8ba12ab3c39f23764ff3b99ba205e
|
[
"MIT"
] | null | null | null |
sources/cloudflare.py
|
drosoCode/StatusExporter
|
8dd099399eb8ba12ab3c39f23764ff3b99ba205e
|
[
"MIT"
] | null | null | null |
sources/cloudflare.py
|
drosoCode/StatusExporter
|
8dd099399eb8ba12ab3c39f23764ff3b99ba205e
|
[
"MIT"
] | null | null | null |
from sources.genericStatusPage import getMetrics as _getMetrics
def getMetrics():
return _getMetrics("https://www.cloudflarestatus.com/api/v2/summary.json")
| 27.166667
| 78
| 0.797546
| 19
| 163
| 6.736842
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0.09816
| 163
| 5
| 79
| 32.6
| 0.863946
| 0
| 0
| 0
| 0
| 0
| 0.319018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d5fe6d737a6f3479ca0ee19c5f09c3dabd439dcb
| 10,137
|
py
|
Python
|
unitorch/datasets/huggingface.py
|
fuliucansheng/UniTorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 2
|
2022-02-05T08:52:00.000Z
|
2022-03-27T07:01:34.000Z
|
unitorch/datasets/huggingface.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | null | null | null |
unitorch/datasets/huggingface.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 1
|
2022-03-27T07:01:13.000Z
|
2022-03-27T07:01:13.000Z
|
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
import os
import datasets
from itertools import cycle
from torch.utils import data
from datasets import load_dataset
from datasets import Dataset
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
class hf_datasets(data.Dataset):
"""
A dataclass of huggingface datasets library
https://github.com/huggingface/datasets
"""
def __init__(self, dataset: Dataset):
"""A class based on huggingface datasets
`dataset` is an instance of huggingface dataset
"""
self.__dataset__ = dataset
@classmethod
def from_csv(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
names: Optional[List[str]] = None,
sep: Optional[str] = "\t",
split: Optional[str] = None,
):
"""
A classmethod of load csv/tsv/text files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
names: header names to data file(s).
sep: seperator for text file(s).
split: which split of the data to load.
Returns: return a dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"csv",
data_dir=data_dir,
data_files=data_files,
delimiter=sep,
column_names=names,
quoting=3,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_json(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
field: Optional[str] = None,
split: Optional[str] = None,
):
"""
A classmethod of load json files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
field: specify the field to load in json file.
split: which split of the data to load.
Returns: return a dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"json",
data_dir=data_dir,
data_files=data_files,
field=field,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_parquet(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
split: Optional[str] = None,
):
"""
A classmethod of load parquet files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
split: which split of the data to load.
Returns: return a dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"parquet",
data_dir=data_dir,
data_files=data_files,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_hub(
cls,
data_name,
config_name=None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
split: Optional[str] = None,
):
"""
A classmethod of load hf hub dataset
Args:
data_name: a dataset repository on the hf hub.
config_name: defining the name of the dataset configuration.
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
split: which split of the data to load.
Returns: return a dataset.
"""
__dataset__ = load_dataset(
data_name,
name=config_name,
data_dir=data_dir,
data_files=data_files,
)
if split in __dataset__:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@property
def dataset(self):
"""
The property of actual hf dataset
"""
return self.__dataset__
def __getitem__(self, idx):
return self.__dataset__[idx]
def __len__(self):
return len(self.__dataset__)
class hf_iterable_datasets(data.IterableDataset):
"""
A dataclass of huggingface datasets library
https://github.com/huggingface/datasets
"""
def __init__(
self,
dataset: Dataset,
):
self.__dataset__ = dataset
def set_epoch(self, epoch):
self.__dataset__.set_epoch(epoch)
@classmethod
def from_csv(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
names: Optional[List[str]] = None,
sep: Optional[str] = "\t",
split: Optional[str] = None,
):
"""
A classmethod of load csv/tsv/text files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
names: header names to data file(s).
sep: seperator for text file(s).
split: which split of the data to load.
Returns: return a streaming dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"csv",
data_dir=data_dir,
data_files=data_files,
delimiter=sep,
column_names=names,
quoting=3,
streaming=True,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_json(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
field: Optional[str] = None,
split: Optional[str] = None,
):
"""
A classmethod of load json files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
field: specify the field to load in json file.
split: which split of the data to load.
Returns: return a streaming dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"json",
data_dir=data_dir,
data_files=data_files,
field=field,
streaming=True,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_parquet(
cls,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
split: Optional[str] = None,
):
"""
A classmethod of load parquet files dataset
Args:
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
split: which split of the data to load.
Returns: return a streaming dataset.
"""
if data_files is None:
return
if isinstance(data_files, str) and not os.path.exists(data_files):
return
__dataset__ = load_dataset(
"parquet",
data_dir=data_dir,
data_files=data_files,
streaming=True,
)
if split not in __dataset__:
__dataset__ = __dataset__.get("train")
else:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@classmethod
def from_hub(
cls,
data_name,
config_name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List[str]]] = None,
split: Optional[str] = None,
):
"""
A classmethod of load hf hub dataset
Args:
data_name: a dataset repository on the hf hub.
config_name: defining the name of the dataset configuration.
data_dir: defining the data_dir of the dataset configuration.
data_files: path(s) to source data file(s).
split: which split of the data to load.
Returns: return a streaming dataset
"""
__dataset__ = load_dataset(
data_name,
name=config_name,
data_dir=data_dir,
data_files=data_files,
streaming=True,
)
if split in __dataset__:
__dataset__ = __dataset__.get(split)
return cls(dataset=__dataset__)
@property
def dataset(self):
"""
The property of actual hf dataset
"""
return self.__dataset__
def __iter__(self):
for row_data in cycle(self.dataset):
yield row_data
| 29.991124
| 78
| 0.571175
| 1,157
| 10,137
| 4.672429
| 0.092481
| 0.083241
| 0.052719
| 0.046245
| 0.896041
| 0.893452
| 0.893452
| 0.893452
| 0.893452
| 0.893452
| 0
| 0.000303
| 0.348427
| 10,137
| 337
| 79
| 30.080119
| 0.818168
| 0.27444
| 0
| 0.848341
| 0
| 0
| 0.009305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075829
| false
| 0
| 0.033175
| 0.009479
| 0.232227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9102167ee42ae758ad7af69f12cf0fedd98e286f
| 1,917
|
py
|
Python
|
cdad/cdadmap/migrations/0035_auto_20150921_1706.py
|
NiJeLorg/CDADMap
|
1f03dccf57951748155a0094a5aec3253183c412
|
[
"MIT"
] | null | null | null |
cdad/cdadmap/migrations/0035_auto_20150921_1706.py
|
NiJeLorg/CDADMap
|
1f03dccf57951748155a0094a5aec3253183c412
|
[
"MIT"
] | null | null | null |
cdad/cdadmap/migrations/0035_auto_20150921_1706.py
|
NiJeLorg/CDADMap
|
1f03dccf57951748155a0094a5aec3253183c412
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0034_auto_20150818_1600'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='contactpanel',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='location',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='locationpanel',
name='idlocation',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='meeting',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='meetingpanel',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='survey',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
migrations.AlterField(
model_name='surveypanel',
name='id',
field=models.AutoField(serialize=False, primary_key=True),
preserve_default=True,
),
]
| 30.428571
| 70
| 0.568075
| 169
| 1,917
| 6.254438
| 0.254438
| 0.151372
| 0.189215
| 0.219489
| 0.738884
| 0.738884
| 0.738884
| 0.738884
| 0.738884
| 0.738884
| 0
| 0.013138
| 0.324987
| 1,917
| 62
| 71
| 30.919355
| 0.803709
| 0.010955
| 0
| 0.696429
| 0
| 0
| 0.068638
| 0.012144
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.089286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
913b26f084763eed5e458406fe835dc766926cf2
| 10,206
|
py
|
Python
|
tests/test_graphics.py
|
andrewlavaia/Traffic-Simulator
|
39c21e94ff3026954f1577a8f9e70c6d605cb286
|
[
"MIT"
] | null | null | null |
tests/test_graphics.py
|
andrewlavaia/Traffic-Simulator
|
39c21e94ff3026954f1577a8f9e70c6d605cb286
|
[
"MIT"
] | null | null | null |
tests/test_graphics.py
|
andrewlavaia/Traffic-Simulator
|
39c21e94ff3026954f1577a8f9e70c6d605cb286
|
[
"MIT"
] | null | null | null |
import unittest
import graphics
class TestConvertPointToViewFraction(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1000, 1000)
def test_x0_y0(self):
x = 0
y = 0
actual = tuple(self.window.convertPointToViewFraction(x, y))
expected = (0.5, 0.5)
self.assertEqual(actual, expected)
def test_min_scrollregion(self):
x = -self.window.scrollregion_x/2
y = -self.window.scrollregion_y/2
actual = tuple(self.window.convertPointToViewFraction(x, y))
expected = (0, 0)
self.assertEqual(actual, expected)
def test_max_scrollregion(self):
x = self.window.scrollregion_x/2
y = self.window.scrollregion_y/2
actual = tuple(self.window.convertPointToViewFraction(x, y))
expected = (1.0, 1.0)
self.assertEqual(actual, expected)
class TestGetCoords(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.zoom_factor = 1.0
def test_coords_with_default_view(self):
actual = tuple(self.window.getCoords())
expected = (0, 768, 1024, 0)
self.assertEqual(actual, expected)
def test_coords_with_default_view_after_2x_zoom(self):
self.window.zoom_factor = 2.0
actual = tuple(self.window.getCoords())
x_adj = (self.window.width/2)/self.window.zoom_factor
y_adj = (self.window.height/2)/self.window.zoom_factor
expected = (x_adj, 768 - y_adj, 1024 - x_adj, y_adj)
self.assertEqual(actual, expected)
def test_coords_with_max_view(self):
self.window.xview_moveto(1.0)
self.window.yview_moveto(1.0)
actual = tuple(self.window.getCoords())
x_adj = (self.window.width/2)/self.window.zoom_factor
y_adj = (self.window.height/2)/self.window.zoom_factor
expected = (0, 768, 1024, 0)
self.assertEqual(actual, expected)
def test_coords_with_max_view_after_2x_zoom(self):
self.window.xview_moveto(0.0)
self.window.yview_moveto(0.0)
self.window.zoom_factor = 2.0
actual = tuple(self.window.getCoords())
x_adj = (self.window.width/2)/self.window.zoom_factor
y_adj = (self.window.height/2)/self.window.zoom_factor
expected = (x_adj, 768 - y_adj, 1024 - x_adj, y_adj)
self.assertEqual(actual, expected)
def test_coords_with_min_view(self):
self.window.xview_moveto(0.0)
self.window.yview_moveto(0.0)
actual = tuple(self.window.getCoords())
x_adj = (self.window.width/2)/self.window.zoom_factor
y_adj = (self.window.height/2)/self.window.zoom_factor
expected = (0, 768, 1024, 0)
self.assertEqual(actual, expected)
def test_coords_with_min_view_after_2x_zoom(self):
self.window.xview_moveto(1.0)
self.window.yview_moveto(1.0)
self.window.zoom_factor = 2.0
actual = tuple(self.window.getCoords())
x_adj = (self.window.width/2)/self.window.zoom_factor
y_adj = (self.window.height/2)/self.window.zoom_factor
expected = (x_adj, 768 - y_adj, 1024 - x_adj, y_adj)
self.assertEqual(actual, expected)
class TestSetCoords(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.zoom_factor = 1.0
def test_no_change_to_view_fraction_after_zoom(self):
self.window.zoom_factor = 2.0
self.window.setCoords(*self.window.getCoords())
actual = tuple(self.window.getViewPoint())
expected = (0, 0)
self.assertEqual(actual, expected)
class TestToScreen(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.zoom_factor = 1.0
def test_00_toscreen_with_default_view_and_default_zoom(self):
actual = self.window.toScreen(0, 0)
expected = (0, 0)
self.assertEqual(actual, expected)
# TODO find out why this test doesn't work
# def test_00_to_screen_with_default_view_and_2x_zoom(self):
# self.window.zoom_factor = 2.0
# self.window.setCoords(*self.window.getCoords())
# print(self.window.getCoords())
# actual = self.window.toScreen(0, 0)
# expected = (-255, -190) # almost equal -256, -192
# self.assertEqual(actual, expected)
# actual = self.window.toScreen(512, 384)
# expected = (0, 0)
# self.assertEqual(actual, expected)
# print(self.window.xview())
# print(self.window.yview())
# print(self.window.getCoords())
# print(self.window.toScreen(0, 0))
# print(self.window.toScreen(256, 192))
# self.window.centerScreenOnPoint(graphics.Point(1024, 768))
# print(self.window.toScreen(256, 192))
# print(self.window.toScreen(1024, 768))
class TestToWorld(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.zoom_factor = 1.0
def test_00_toworld_with_default_view_and_default_zoom(self):
actual = self.window.toWorld(0, 0)
expected = (0, 0)
self.assertEqual(actual, expected)
def test_no_change_from_screen_to_world_and_back(self):
wx, wy = self.window.toWorld(400, 400)
sx, sy = self.window.toScreen(wx, wy)
new_wx, new_wy = self.window.toWorld(sx, sy)
actual = (int(new_wx), int(new_wy))
expected = (400, 400)
self.assertEqual(actual, expected)
class TestZoomAdj(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.zoom_factor = 1.0
def test_zoom_adj_no_zoom(self):
actual = tuple(self.window.getZoomAdj())
expected = (0, 0)
self.assertEqual(actual, expected)
def test_zoom_adj_2x_zoom(self):
self.window.zoom_factor = 2.0
actual = tuple(self.window.getZoomAdj())
expected = (256, 192)
self.assertEqual(actual, expected)
def test_zoom_adj_half_zoom(self):
self.window.zoom_factor = 0.5
actual = tuple(self.window.getZoomAdj())
expected = (-512, -384)
self.assertEqual(actual, expected)
def test_zoom_adj_same_before_and_after_zoom(self):
self.window.zoom_factor += 0.1
actual = tuple(self.window.getZoomAdj())
expected = (46.54545, 34.90909)
self.assertAlmostEqual(expected[0], actual[0], places=5)
self.assertAlmostEqual(expected[1], actual[1], places=5)
self.window.zoom_factor -= 0.1
actual = tuple(self.window.getZoomAdj())
expected = (0, 0)
self.assertAlmostEqual(expected[0], actual[0], places=5)
self.assertAlmostEqual(expected[1], actual[1], places=5)
class TestcenterScreenOnPoint(unittest.TestCase):
def setUp(self):
self.window = graphics.GraphWin('Test', 1024, 768)
self.window.xview_moveto(0.5)
self.window.yview_moveto(0.5)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
self.window.zoom_factor = 1.0
def test_center_view_on_center_point(self):
self.window.centerScreenOnPoint(512, 384)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getViewPoint())
expected = (0, 0)
self.assertEqual(actual, expected)
# confirm view fraction hasn't changed
xview = self.window.xview()
yview = self.window.yview()
actual = (xview[0], yview[0])
expected = (0.5, 0.5)
self.assertEqual(actual, expected)
def test_center_view_on_00(self):
self.window.centerScreenOnPoint(0, 0)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getViewPoint())
expected = (-512, -384)
self.assertEqual(actual, expected)
def test_center_view_on_00_after_2x_zoom(self):
self.window.zoom_factor = 2.0
self.window.centerScreenOnPoint(0, 0)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getCenterViewPoint())
expected = (0, 0)
self.assertEqual(actual, expected)
def test_center_view_on_00_before_2x_zoom(self):
self.window.centerScreenOnPoint(0, 0)
self.window.zoom_factor = 2.0
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getCenterViewPoint())
expected = (0, 0)
self.assertEqual(actual, expected)
def test_center_view_on_offset_before_2x_zoom(self):
self.window.centerScreenOnPoint(1024, 768)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getCenterViewPoint())
expected = (1024, 768)
self.assertEqual(actual, expected)
self.window.zoom_factor = 2.0
actual = tuple(self.window.getViewPoint())
expected = (512, 384)
self.assertEqual(actual, expected)
def test_center_view_on_offset_after_2x_zoom(self):
self.window.zoom_factor = 2.0
self.window.centerScreenOnPoint(1024, 768)
self.window.last_xview = self.window.xview()
self.window.last_yview = self.window.yview()
actual = tuple(self.window.getCenterViewPoint())
expected = (1024, 768)
self.assertEqual(actual, expected)
actual = tuple(self.window.getViewPoint())
expected = (512, 384)
self.assertEqual(actual, expected)
| 37.384615
| 68
| 0.64903
| 1,330
| 10,206
| 4.809023
| 0.082707
| 0.232958
| 0.063477
| 0.090682
| 0.876485
| 0.855847
| 0.814415
| 0.7894
| 0.766573
| 0.715447
| 0
| 0.052042
| 0.229963
| 10,206
| 272
| 69
| 37.522059
| 0.761802
| 0.080345
| 0
| 0.727273
| 0
| 0
| 0.00299
| 0
| 0
| 0
| 0
| 0.003676
| 0.138756
| 1
| 0.143541
| false
| 0
| 0.009569
| 0
| 0.186603
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9141c9e2960d8366841b7cf8a6162aed8112cded
| 25,379
|
py
|
Python
|
sdk/python/pulumi_aws/cloudwatch/event_connection.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudwatch/event_connection.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudwatch/event_connection.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EventConnectionArgs', 'EventConnection']
@pulumi.input_type
class EventConnectionArgs:
def __init__(__self__, *,
auth_parameters: pulumi.Input['EventConnectionAuthParametersArgs'],
authorization_type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EventConnection resource.
:param pulumi.Input['EventConnectionAuthParametersArgs'] auth_parameters: Parameters used for authorization. A maximum of 1 are allowed. Documented below.
:param pulumi.Input[str] authorization_type: Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
:param pulumi.Input[str] description: Enter a description for the connection. Maximum of 512 characters.
:param pulumi.Input[str] name: The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
"""
pulumi.set(__self__, "auth_parameters", auth_parameters)
pulumi.set(__self__, "authorization_type", authorization_type)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="authParameters")
def auth_parameters(self) -> pulumi.Input['EventConnectionAuthParametersArgs']:
"""
Parameters used for authorization. A maximum of 1 are allowed. Documented below.
"""
return pulumi.get(self, "auth_parameters")
@auth_parameters.setter
def auth_parameters(self, value: pulumi.Input['EventConnectionAuthParametersArgs']):
pulumi.set(self, "auth_parameters", value)
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> pulumi.Input[str]:
"""
Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
"""
return pulumi.get(self, "authorization_type")
@authorization_type.setter
def authorization_type(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Enter a description for the connection. Maximum of 512 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _EventConnectionState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
auth_parameters: Optional[pulumi.Input['EventConnectionAuthParametersArgs']] = None,
authorization_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EventConnection resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the connection.
:param pulumi.Input['EventConnectionAuthParametersArgs'] auth_parameters: Parameters used for authorization. A maximum of 1 are allowed. Documented below.
:param pulumi.Input[str] authorization_type: Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
:param pulumi.Input[str] description: Enter a description for the connection. Maximum of 512 characters.
:param pulumi.Input[str] name: The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
:param pulumi.Input[str] secret_arn: The Amazon Resource Name (ARN) of the secret created from the authorization parameters specified for the connection.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if auth_parameters is not None:
pulumi.set(__self__, "auth_parameters", auth_parameters)
if authorization_type is not None:
pulumi.set(__self__, "authorization_type", authorization_type)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if secret_arn is not None:
pulumi.set(__self__, "secret_arn", secret_arn)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the connection.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="authParameters")
def auth_parameters(self) -> Optional[pulumi.Input['EventConnectionAuthParametersArgs']]:
"""
Parameters used for authorization. A maximum of 1 are allowed. Documented below.
"""
return pulumi.get(self, "auth_parameters")
@auth_parameters.setter
def auth_parameters(self, value: Optional[pulumi.Input['EventConnectionAuthParametersArgs']]):
pulumi.set(self, "auth_parameters", value)
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> Optional[pulumi.Input[str]]:
"""
Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
"""
return pulumi.get(self, "authorization_type")
@authorization_type.setter
def authorization_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Enter a description for the connection. Maximum of 512 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the secret created from the authorization parameters specified for the connection.
"""
return pulumi.get(self, "secret_arn")
@secret_arn.setter
def secret_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_arn", value)
class EventConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_parameters: Optional[pulumi.Input[pulumi.InputType['EventConnectionAuthParametersArgs']]] = None,
authorization_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an EventBridge connection resource.
> **Note:** EventBridge was formerly known as CloudWatch Events. The functionality is identical.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
api_key={
"key": "x-signature",
"value": "1234",
},
),
authorization_type="API_KEY",
description="A connection description")
```
### Basic Authorization
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
basic=aws.cloudwatch.EventConnectionAuthParametersBasicArgs(
password="Pass1234!",
username="user",
),
),
authorization_type="BASIC",
description="A connection description")
```
### OAuth Authorization
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
oauth=aws.cloudwatch.EventConnectionAuthParametersOauthArgs(
authorization_endpoint="https://auth.url.com/endpoint",
client_parameters=aws.cloudwatch.EventConnectionAuthParametersOauthClientParametersArgs(
client_id="1234567890",
client_secret="Pass1234!",
),
http_method="GET",
oauth_http_parameters=aws.cloudwatch.EventConnectionAuthParametersOauthOauthHttpParametersArgs(
body=[{
"isValueSecret": False,
"key": "body-parameter-key",
"value": "body-parameter-value",
}],
header=[{
"isValueSecret": False,
"key": "header-parameter-key",
"value": "header-parameter-value",
}],
query_string=[{
"isValueSecret": False,
"key": "query-string-parameter-key",
"value": "query-string-parameter-value",
}],
),
),
),
authorization_type="BASIC",
description="A connection description")
```
### Invocation Http Parameters
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
basic=aws.cloudwatch.EventConnectionAuthParametersBasicArgs(
password="Pass1234!",
username="user",
),
invocation_http_parameters=aws.cloudwatch.EventConnectionAuthParametersInvocationHttpParametersArgs(
body=[
{
"isValueSecret": False,
"key": "body-parameter-key",
"value": "body-parameter-value",
},
{
"isValueSecret": True,
"key": "body-parameter-key2",
"value": "body-parameter-value2",
},
],
header=[{
"isValueSecret": False,
"key": "header-parameter-key",
"value": "header-parameter-value",
}],
query_string=[{
"isValueSecret": False,
"key": "query-string-parameter-key",
"value": "query-string-parameter-value",
}],
),
),
authorization_type="BASIC",
description="A connection description")
```
## Import
EventBridge Connection can be imported using the `name`, e.g. console
```sh
$ pulumi import aws:cloudwatch/eventConnection:EventConnection test ngrok-connection
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['EventConnectionAuthParametersArgs']] auth_parameters: Parameters used for authorization. A maximum of 1 are allowed. Documented below.
:param pulumi.Input[str] authorization_type: Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
:param pulumi.Input[str] description: Enter a description for the connection. Maximum of 512 characters.
:param pulumi.Input[str] name: The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EventConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an EventBridge connection resource.
> **Note:** EventBridge was formerly known as CloudWatch Events. The functionality is identical.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
api_key={
"key": "x-signature",
"value": "1234",
},
),
authorization_type="API_KEY",
description="A connection description")
```
### Basic Authorization
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
basic=aws.cloudwatch.EventConnectionAuthParametersBasicArgs(
password="Pass1234!",
username="user",
),
),
authorization_type="BASIC",
description="A connection description")
```
### OAuth Authorization
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
oauth=aws.cloudwatch.EventConnectionAuthParametersOauthArgs(
authorization_endpoint="https://auth.url.com/endpoint",
client_parameters=aws.cloudwatch.EventConnectionAuthParametersOauthClientParametersArgs(
client_id="1234567890",
client_secret="Pass1234!",
),
http_method="GET",
oauth_http_parameters=aws.cloudwatch.EventConnectionAuthParametersOauthOauthHttpParametersArgs(
body=[{
"isValueSecret": False,
"key": "body-parameter-key",
"value": "body-parameter-value",
}],
header=[{
"isValueSecret": False,
"key": "header-parameter-key",
"value": "header-parameter-value",
}],
query_string=[{
"isValueSecret": False,
"key": "query-string-parameter-key",
"value": "query-string-parameter-value",
}],
),
),
),
authorization_type="BASIC",
description="A connection description")
```
### Invocation Http Parameters
```python
import pulumi
import pulumi_aws as aws
test = aws.cloudwatch.EventConnection("test",
auth_parameters=aws.cloudwatch.EventConnectionAuthParametersArgs(
basic=aws.cloudwatch.EventConnectionAuthParametersBasicArgs(
password="Pass1234!",
username="user",
),
invocation_http_parameters=aws.cloudwatch.EventConnectionAuthParametersInvocationHttpParametersArgs(
body=[
{
"isValueSecret": False,
"key": "body-parameter-key",
"value": "body-parameter-value",
},
{
"isValueSecret": True,
"key": "body-parameter-key2",
"value": "body-parameter-value2",
},
],
header=[{
"isValueSecret": False,
"key": "header-parameter-key",
"value": "header-parameter-value",
}],
query_string=[{
"isValueSecret": False,
"key": "query-string-parameter-key",
"value": "query-string-parameter-value",
}],
),
),
authorization_type="BASIC",
description="A connection description")
```
## Import
EventBridge Connection can be imported using the `name`, e.g. console
```sh
$ pulumi import aws:cloudwatch/eventConnection:EventConnection test ngrok-connection
```
:param str resource_name: The name of the resource.
:param EventConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EventConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_parameters: Optional[pulumi.Input[pulumi.InputType['EventConnectionAuthParametersArgs']]] = None,
authorization_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EventConnectionArgs.__new__(EventConnectionArgs)
if auth_parameters is None and not opts.urn:
raise TypeError("Missing required property 'auth_parameters'")
__props__.__dict__["auth_parameters"] = auth_parameters
if authorization_type is None and not opts.urn:
raise TypeError("Missing required property 'authorization_type'")
__props__.__dict__["authorization_type"] = authorization_type
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["arn"] = None
__props__.__dict__["secret_arn"] = None
super(EventConnection, __self__).__init__(
'aws:cloudwatch/eventConnection:EventConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
auth_parameters: Optional[pulumi.Input[pulumi.InputType['EventConnectionAuthParametersArgs']]] = None,
authorization_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None) -> 'EventConnection':
"""
Get an existing EventConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the connection.
:param pulumi.Input[pulumi.InputType['EventConnectionAuthParametersArgs']] auth_parameters: Parameters used for authorization. A maximum of 1 are allowed. Documented below.
:param pulumi.Input[str] authorization_type: Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
:param pulumi.Input[str] description: Enter a description for the connection. Maximum of 512 characters.
:param pulumi.Input[str] name: The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
:param pulumi.Input[str] secret_arn: The Amazon Resource Name (ARN) of the secret created from the authorization parameters specified for the connection.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EventConnectionState.__new__(_EventConnectionState)
__props__.__dict__["arn"] = arn
__props__.__dict__["auth_parameters"] = auth_parameters
__props__.__dict__["authorization_type"] = authorization_type
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["secret_arn"] = secret_arn
return EventConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the connection.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authParameters")
def auth_parameters(self) -> pulumi.Output['outputs.EventConnectionAuthParameters']:
"""
Parameters used for authorization. A maximum of 1 are allowed. Documented below.
"""
return pulumi.get(self, "auth_parameters")
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> pulumi.Output[str]:
"""
Choose the type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`.
"""
return pulumi.get(self, "authorization_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Enter a description for the connection. Maximum of 512 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the new connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the secret created from the authorization parameters specified for the connection.
"""
return pulumi.get(self, "secret_arn")
| 43.088285
| 180
| 0.594389
| 2,376
| 25,379
| 6.1633
| 0.090909
| 0.051079
| 0.050669
| 0.048074
| 0.861718
| 0.847514
| 0.832696
| 0.820199
| 0.810161
| 0.79985
| 0
| 0.005646
| 0.309035
| 25,379
| 588
| 181
| 43.161565
| 0.829437
| 0.504787
| 0
| 0.606635
| 1
| 0
| 0.127848
| 0.037486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156398
| false
| 0.004739
| 0.033175
| 0
| 0.28436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e67cacc08186edac6600aafed006c73951ae8925
| 30
|
py
|
Python
|
api/handlers/general.py
|
bharadwaj-pendyala/opentestdata-api
|
ff388ff2dd6d43f45e11cb5689d06ba257b23f09
|
[
"MIT"
] | 15
|
2019-06-27T02:48:02.000Z
|
2020-11-29T09:01:29.000Z
|
api/handlers/general.py
|
bharadwaj-pendyala/opentestdata-api
|
ff388ff2dd6d43f45e11cb5689d06ba257b23f09
|
[
"MIT"
] | 16
|
2019-07-26T19:51:55.000Z
|
2022-03-12T00:00:24.000Z
|
api/handlers/general.py
|
bharadwaj-pendyala/opentestdata-api
|
ff388ff2dd6d43f45e11cb5689d06ba257b23f09
|
[
"MIT"
] | 7
|
2019-06-26T11:10:50.000Z
|
2020-09-04T08:52:58.000Z
|
def ping():
return 'PONG'
| 10
| 17
| 0.566667
| 4
| 30
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 30
| 2
| 18
| 15
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e6825388c51b876b3c048cfcbfa026d33629c0a3
| 22,683
|
py
|
Python
|
testcases/trades_analyzer_test.py
|
daxlab/pyalgotrade
|
5517c2644da97e7ef143d344d813232d6845a29f
|
[
"Apache-2.0"
] | 1,000
|
2016-01-26T12:10:11.000Z
|
2022-03-01T23:59:50.000Z
|
testcases/trades_analyzer_test.py
|
daxlab/pyalgotrade
|
5517c2644da97e7ef143d344d813232d6845a29f
|
[
"Apache-2.0"
] | 22
|
2016-01-26T15:14:09.000Z
|
2019-01-30T02:36:38.000Z
|
testcases/trades_analyzer_test.py
|
daxlab/pyalgotrade
|
5517c2644da97e7ef143d344d813232d6845a29f
|
[
"Apache-2.0"
] | 613
|
2016-01-27T01:02:30.000Z
|
2022-03-21T01:38:58.000Z
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import math
from distutils import version
import pytz
import numpy
import common
import strategy_test
import position_test
from pyalgotrade.barfeed import ninjatraderfeed
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.stratanalyzer import trades
from pyalgotrade import broker
from pyalgotrade.broker import backtesting
def buildUTCDateTime(year, month, day, hour, minute):
ret = datetime.datetime(year, month, day, hour, minute)
ret = pytz.utc.localize(ret)
return ret
class TradesAnalyzerTestCase(common.TestCase):
TestInstrument = "spy"
def __loadBarFeed(self):
ret = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE)
barFilter = csvfeed.USEquitiesRTH()
ret.setBarFilter(barFilter)
ret.addBarsFromCSV(TradesAnalyzerTestCase.TestInstrument, common.get_data_file_path("nt-spy-minute-2011.csv"))
return ret
def __createStrategy(self):
barFeed = self.__loadBarFeed()
return strategy_test.TestStrategy(barFeed, 1000)
def __createPositionStrategy(self):
barFeed = self.__loadBarFeed()
return position_test.TestStrategy(barFeed, TradesAnalyzerTestCase.TestInstrument, 1000)
def testNoTrades(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
strat.run()
self.assertTrue(strat.getBroker().getCash() == 1000)
self.assertTrue(stratAnalyzer.getCount() == 0)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testSomeTrades_Position(self):
strat = self.__createPositionStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Winning trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 0), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 16)) # 127.16
# Losing trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 30), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 31)) # 127.16
# Winning trade
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 38), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 42)) # 127.26
# Unfinished trade not closed
strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15, 47), strat.enterLong, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2) + (127.26 - 127.16) - 127.34, 2))
self.assertTrue(stratAnalyzer.getCount() == 3)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.03)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 2) == 0.07)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=0), 2) == 0.06)
self.assertTrue(stratAnalyzer.getProfitableCount() == 2)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=1), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=0), 2) == 0.04)
self.assertEqual(stratAnalyzer.getPositiveReturns()[0], (127.16 - 127.14) / 127.14)
self.assertEqual(stratAnalyzer.getPositiveReturns()[1], (127.26 - 127.16) / 127.16)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
if version.LooseVersion(numpy.__version__) >= version.LooseVersion("1.6.2"):
self.assertTrue(math.isnan(stratAnalyzer.getLosses().std(ddof=1)))
else:
self.assertTrue(stratAnalyzer.getLosses().std(ddof=1) == 0)
self.assertTrue(stratAnalyzer.getLosses().std(ddof=0) == 0)
self.assertEqual(stratAnalyzer.getNegativeReturns()[0], (127.16 - 127.2) / 127.2)
def testSomeTrades(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Losing trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 31), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.26
# Open trade.
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 47), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2) + (127.26 - 127.16) - 127.34, 2))
self.assertTrue(stratAnalyzer.getCount() == 3)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.03)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 2) == 0.07)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=0), 2) == 0.06)
self.assertTrue(stratAnalyzer.getProfitableCount() == 2)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=1), 2) == 0.06)
self.assertTrue(round(stratAnalyzer.getProfits().std(ddof=0), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
if version.LooseVersion(numpy.__version__) >= version.LooseVersion("1.6.2"):
self.assertTrue(math.isnan(stratAnalyzer.getLosses().std(ddof=1)))
else:
self.assertTrue(stratAnalyzer.getLosses().std(ddof=1) == 0)
self.assertTrue(stratAnalyzer.getLosses().std(ddof=0) == 0)
def testSomeTradesWithCommissions(self):
strat = self.__createStrategy()
strat.getBroker().setCommission(backtesting.FixedPerTrade(0.01))
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Losing trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 31), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Winning trade
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.26
# Open trade.
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 47), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.34
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.2) + (127.26 - 127.16) - 127.34 - 0.01*5, 2))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForAllTrades(), numpy.array([0.02, 0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForProfitableTrades(), numpy.array([0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForUnprofitableTrades(), numpy.array([0.02])))
self.assertTrue(numpy.array_equal(stratAnalyzer.getCommissionsForEvenTrades(), numpy.array([])))
def testProportionalCommissionBug(self):
# Regression test for a bug reported by 'Jackson Sam' on 30/Aug/2013.
strat = self.__createStrategy()
strat.getBroker().setCommission(backtesting.FixedPerTrade(0.01))
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# There are 3 trades here:
# Trade 1 (Long)
# Buy 1 @ 127.16 Commission: 0.01
# Sell 1 @ 127.26 Commission: 0.005
# Trade 2 (Short)
# Sell 1 @ 127.26 Commission: 0.005
# Buy 1 @ 127.37 Commission: 0.005
# Trade 3 (Long)
# Buy 1 @ 127.37 Commission: 0.005
# Sell 1 @ 127.4 Commission: 0.01
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # Fill at 127.16
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # Fill at 127.26
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 53), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 2) # Fill at 127.37
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 58), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # Fill at 127.4
strat.run()
allReturns = stratAnalyzer.getAllReturns()
self.assertEqual(round(allReturns[0], 6), 0.000668)
self.assertEqual(round(allReturns[1], 6), -0.000943)
self.assertEqual(round(allReturns[2], 6), 0.000118)
def testLongShort(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit long and enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.02)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
def testLongShort2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.16 - 127.14) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.02)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.04)
def testShortLong(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit short and enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 2) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.02)
def testShortLong2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 2)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.01)
self.assertTrue(round(stratAnalyzer.getAll().std(ddof=1), 4) == 0.0424)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.04)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.02)
def testLong2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Extend long position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 2) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.2 - 127.14) + (127.2 - 127.16), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.1)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.1)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testLong3(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.BUY, TradesAnalyzerTestCase.TestInstrument, 2) # 127.14
# Decrease long position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit long
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.2 - 127.14) + (127.16 - 127.14), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == 0.08)
self.assertTrue(stratAnalyzer.getProfitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getProfits().mean(), 2) == 0.08)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 0)
def testShort2(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.14
# Extend short position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 2) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.2) + (127.16 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.1)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.1)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
def testShort3(self):
strat = self.__createStrategy()
stratAnalyzer = trades.Trades()
strat.attachAnalyzer(stratAnalyzer)
# Enter short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0), strat.getBroker().createMarketOrder, broker.Order.Action.SELL_SHORT, TradesAnalyzerTestCase.TestInstrument, 2) # 127.14
# Decrease short position
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.16
# Exit short
strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30), strat.getBroker().createMarketOrder, broker.Order.Action.BUY_TO_COVER, TradesAnalyzerTestCase.TestInstrument, 1) # 127.2
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == round(1000 + (127.14 - 127.16) + (127.14 - 127.2), 2))
self.assertTrue(stratAnalyzer.getCount() == 1)
self.assertTrue(stratAnalyzer.getEvenCount() == 0)
self.assertTrue(round(stratAnalyzer.getAll().mean(), 2) == -0.08)
self.assertTrue(stratAnalyzer.getUnprofitableCount() == 1)
self.assertTrue(round(stratAnalyzer.getLosses().mean(), 2) == -0.08)
self.assertTrue(stratAnalyzer.getProfitableCount() == 0)
| 54.136038
| 183
| 0.687211
| 2,525
| 22,683
| 6.143366
| 0.091881
| 0.093863
| 0.066336
| 0.069495
| 0.85089
| 0.841091
| 0.815304
| 0.789131
| 0.77108
| 0.759219
| 0
| 0.079874
| 0.177049
| 22,683
| 418
| 184
| 54.26555
| 0.751112
| 0.083675
| 0
| 0.683206
| 0
| 0
| 0.001694
| 0.001065
| 0
| 0
| 0
| 0
| 0.419847
| 1
| 0.064886
| false
| 0
| 0.049618
| 0
| 0.137405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e68506b78bcb720b6951381e7024c8bc661539b4
| 13,633
|
py
|
Python
|
Python/tests/datatypes/test_timex_resolver.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 1
|
2019-01-03T16:41:29.000Z
|
2019-01-03T16:41:29.000Z
|
Python/tests/datatypes/test_timex_resolver.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 76
|
2018-11-09T18:19:44.000Z
|
2019-08-20T20:29:53.000Z
|
Python/tests/datatypes/test_timex_resolver.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 6
|
2017-05-04T17:24:59.000Z
|
2019-07-23T15:48:44.000Z
|
from datatypes_timex_expression import Timex, datetime, TimexResolver
def test_datatypes_resolver_date_definite():
today = datetime(2017, 9, 26, 15, 30, 0)
resolution = TimexResolver.resolve(["2017-09-28"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2017-09-28"
assert resolution.values[0].type == "date"
assert resolution.values[0].value == "2017-09-28"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_date_saturday():
today = datetime(2017, 9, 26, 15, 30, 0)
resolution = TimexResolver.resolve(["XXXX-WXX-6"], today)
assert len(resolution.values) == 2
assert resolution.values[0].timex == "XXXX-WXX-6"
assert resolution.values[0].type == "date"
assert resolution.values[0].value == "2017-09-23"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
assert resolution.values[1].timex == "XXXX-WXX-6"
assert resolution.values[1].type == "date"
assert resolution.values[1].value == "2017-09-30"
assert resolution.values[1].start is None
assert resolution.values[1].end is None
def test_datatypes_resolver_date_sunday():
today = datetime(2019, 4, 23, 15, 30, 0)
resolution = TimexResolver.resolve(["XXXX-WXX-7"], today)
assert len(resolution.values) == 2
assert resolution.values[0].timex == "XXXX-WXX-7"
assert resolution.values[0].type == "date"
assert resolution.values[0].value == "2019-04-21"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
assert resolution.values[1].timex == "XXXX-WXX-7"
assert resolution.values[1].type == "date"
assert resolution.values[1].value == "2019-04-28"
assert resolution.values[1].start is None
assert resolution.values[1].end is None
def test_datatypes_resolver_date_wednesday_4():
today = datetime(2017, 9, 28, 15, 30, 0)
resolution = TimexResolver.resolve(["XXXX-WXX-3T04", "XXXX-WXX-3T16"], today)
assert len(resolution.values) == 4
assert resolution.values[0].timex == "XXXX-WXX-3T04"
assert resolution.values[0].type == "datetime"
assert resolution.values[0].value == "2017-09-27 04:00:00"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
assert resolution.values[1].timex == "XXXX-WXX-3T04"
assert resolution.values[1].type == "datetime"
assert resolution.values[1].value == "2017-10-04 04:00:00"
assert resolution.values[1].start is None
assert resolution.values[1].end is None
assert resolution.values[2].timex == "XXXX-WXX-3T16"
assert resolution.values[2].type == "datetime"
assert resolution.values[2].value == "2017-09-27 16:00:00"
assert resolution.values[2].start is None
assert resolution.values[2].end is None
assert resolution.values[3].timex == "XXXX-WXX-3T16"
assert resolution.values[3].type == "datetime"
assert resolution.values[3].value == "2017-10-04 16:00:00"
assert resolution.values[3].start is None
assert resolution.values[3].end is None
def test_datatypes_resolver_datetime_wednesday_4_am():
today = datetime(2017, 9, 7)
resolution = TimexResolver.resolve(["2017-10-11T04"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2017-10-11T04"
assert resolution.values[0].type == "datetime"
assert resolution.values[0].value == "2017-10-11 04:00:00"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_2years():
today = datetime(2017, 9, 7)
resolution = TimexResolver.resolve(["P2Y"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "P2Y"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "63072000"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_6month():
today = datetime(2017, 9, 7)
resolution = TimexResolver.resolve(["P6M"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "P6M"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "15552000"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_3weeks():
resolution = TimexResolver.resolve(["P3W"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "P3W"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "1814400"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_5days():
resolution = TimexResolver.resolve(["P5D"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "P5D"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "432000"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_8hours():
resolution = TimexResolver.resolve(["PT8H"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "PT8H"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "28800"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_15minutes():
resolution = TimexResolver.resolve(["PT15M"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "PT15M"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "900"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_duration_10seconds():
resolution = TimexResolver.resolve(["PT10S"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "PT10S"
assert resolution.values[0].type == "duration"
assert resolution.values[0].value == "10"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_dateRange_september():
today = datetime(2017, 9, 28)
resolution = TimexResolver.resolve(["XXXX-09"], today)
assert len(resolution.values) == 2
assert resolution.values[0].timex == "XXXX-09"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2016-09-01"
assert resolution.values[0].end == "2016-10-01"
assert resolution.values[0].value is None
assert resolution.values[1].timex == "XXXX-09"
assert resolution.values[1].type == "daterange"
assert resolution.values[1].start == "2017-09-01"
assert resolution.values[1].end == "2017-10-01"
assert resolution.values[1].value is None
def test_datatypes_resolver_dateRange_winter():
resolution = TimexResolver.resolve(["WI"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "WI"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].value == "not resolved"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_dateRange_last_week():
today = datetime(2017, 4, 30)
resolution = TimexResolver.resolve(["2019-W17"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2019-W17"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2019-04-22"
assert resolution.values[0].end == "2019-04-29"
def test_datatypes_resolver_dateRange_last_month():
today = datetime(2017, 4, 30)
resolution = TimexResolver.resolve(["2019-03"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2019-03"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2019-03-01"
assert resolution.values[0].end == "2019-04-01"
def test_datatypes_resolver_dateRange_last_year():
today = datetime(2017, 4, 30)
resolution = TimexResolver.resolve(["2018"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2018"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2018-01-01"
assert resolution.values[0].end == "2019-01-01"
def test_datatypes_resolver_dateRange_last_three_weeks():
today = datetime(2017, 4, 30)
resolution = TimexResolver.resolve(["(2019-04-10,2019-05-01,P3W)"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "(2019-04-10,2019-05-01,P3W)"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2019-04-10"
assert resolution.values[0].end == "2019-05-01"
def test_datatypes_resolver_dateRange_last_ten_years():
today = datetime(2021, 1, 1)
resolution = TimexResolver.resolve(["(2011-01-01,2021-01-01,P10Y)"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "(2011-01-01,2021-01-01,P10Y)"
assert resolution.values[0].type == "daterange"
assert resolution.values[0].start == "2011-01-01"
assert resolution.values[0].end == "2021-01-01"
def test_datatypes_resolver_timeRange_4am_to_8pm():
today = datetime.now()
resolution = TimexResolver.resolve(["(T04,T20,PT16H)"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "(T04,T20,PT16H)"
assert resolution.values[0].type == "timerange"
assert resolution.values[0].start == "04:00:00"
assert resolution.values[0].end == "20:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_timeRange_morning():
today = datetime.now()
resolution = TimexResolver.resolve(["TMO"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "TMO"
assert resolution.values[0].type == "timerange"
assert resolution.values[0].start == "08:00:00"
assert resolution.values[0].end == "12:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_timeRange_afternoon():
today = datetime.now()
resolution = TimexResolver.resolve(["TAF"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "TAF"
assert resolution.values[0].type == "timerange"
assert resolution.values[0].start == "12:00:00"
assert resolution.values[0].end == "16:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_timeRange_evening():
today = datetime.now()
resolution = TimexResolver.resolve(["TEV"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "TEV"
assert resolution.values[0].type == "timerange"
assert resolution.values[0].start == "16:00:00"
assert resolution.values[0].end == "20:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_dateTimeRange_this_morning():
today = datetime.now()
resolution = TimexResolver.resolve(["2017-10-07TMO"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2017-10-07TMO"
assert resolution.values[0].type == "datetimerange"
assert resolution.values[0].start == "2017-10-07 08:00:00"
assert resolution.values[0].end == "2017-10-07 12:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_dateTimeRange_tonight():
today = datetime.now()
resolution = TimexResolver.resolve(["2018-03-18TNI"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "2018-03-18TNI"
assert resolution.values[0].type == "datetimerange"
assert resolution.values[0].start == "2018-03-18 20:00:00"
assert resolution.values[0].end == "2018-03-18 24:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_dateTimeRange_next_monday_4am_to_next_thursday_3pm():
today = datetime.now()
resolution = TimexResolver.resolve(["(2017-10-09T04,2017-10-12T15,PT83H)"], today)
assert len(resolution.values) == 1
assert resolution.values[0].timex == "(2017-10-09T04,2017-10-12T15,PT83H)"
assert resolution.values[0].type == "datetimerange"
assert resolution.values[0].start == "2017-10-09 04:00:00"
assert resolution.values[0].end == "2017-10-12 15:00:00"
assert resolution.values[0].value is None
def test_datatypes_resolver_time_4am():
resolution = TimexResolver.resolve(["T04"])
assert len(resolution.values) == 1
assert resolution.values[0].timex == "T04"
assert resolution.values[0].type == "time"
assert resolution.values[0].value == "04:00:00"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
def test_datatypes_resolver_time_4_oclock():
resolution = TimexResolver.resolve(["T04", "T16"])
assert len(resolution.values) == 2
assert resolution.values[0].timex == "T04"
assert resolution.values[0].type == "time"
assert resolution.values[0].value == "04:00:00"
assert resolution.values[0].start is None
assert resolution.values[0].end is None
assert resolution.values[1].timex == "T16"
assert resolution.values[1].type == "time"
assert resolution.values[1].value == "16:00:00"
assert resolution.values[1].start is None
assert resolution.values[1].end is None
| 37.350685
| 86
| 0.700213
| 1,842
| 13,633
| 5.109663
| 0.074376
| 0.336592
| 0.397365
| 0.329898
| 0.896621
| 0.857629
| 0.797174
| 0.71717
| 0.667021
| 0.642159
| 0
| 0.088686
| 0.157192
| 13,633
| 364
| 87
| 37.453297
| 0.730461
| 0
| 0
| 0.454545
| 0
| 0
| 0.103426
| 0.013203
| 0
| 0
| 0
| 0
| 0.72
| 1
| 0.101818
| false
| 0
| 0.003636
| 0
| 0.105455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e6fdb7dcb34b3cadf2720757de486e43f465bb23
| 12,263
|
py
|
Python
|
tests/test_apistar_jwt.py
|
audiolion/apistar-jwt
|
b3e0a9bd63a7729cab59450569e6f8021ceaba4f
|
[
"MIT"
] | 42
|
2017-10-05T00:58:10.000Z
|
2020-02-22T22:36:30.000Z
|
tests/test_apistar_jwt.py
|
audiolion/apistar-jwt
|
b3e0a9bd63a7729cab59450569e6f8021ceaba4f
|
[
"MIT"
] | 19
|
2017-10-06T18:31:42.000Z
|
2019-10-18T16:22:22.000Z
|
tests/test_apistar_jwt.py
|
audiolion/apistar-jwt
|
b3e0a9bd63a7729cab59450569e6f8021ceaba4f
|
[
"MIT"
] | 11
|
2017-10-06T20:02:20.000Z
|
2018-08-14T12:10:32.000Z
|
#!/usr/bin/env python
"""Tests for `apistar_jwt` package."""
import jwt
import pytest
from datetime import datetime, timedelta
from apistar import Route, exceptions, http
from apistar.test import TestClient
from apistar.server.app import App, ASyncApp
from apistar_jwt.token import JWT, JWTUser
from apistar_jwt.decorators import authentication_required, anonymous_allowed
@authentication_required
def auth_required(request: http.Request, user: JWTUser):
return user.__dict__
@anonymous_allowed
def anon_allowed(request: http.Request, user: JWTUser):
if user:
return user.__dict__
return None
def test_configuration_error() -> None:
with pytest.raises(exceptions.ConfigurationError):
JWT()
@pytest.mark.parametrize('app_class', [App, ASyncApp])
def test_jwt_auth_required(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/auth-required', 'GET', auth_required),
]
components = [JWT({'JWT_SECRET': secret})]
app = app_class(routes=routes, components=components)
client = TestClient(app)
response = client.get('/auth-required')
assert response.status_code == 401
response = client.get('/auth-required', headers={
'Authorization': 'Bearer',
})
assert response.status_code == 401
response = client.get('/auth-required', headers={
'Authorization': 'Basic username',
})
assert response.status_code == 401
payload = {'id': 1, 'username': 'bailey'}
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
data = response.json()
assert data['id'] == payload['id']
assert data['username'] == payload['username']
assert data['token'] == payload
# wrong secret
encoded_jwt = jwt.encode(payload, 'wrong secret', algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# wrong algorithm
encoded_jwt = jwt.encode(payload, secret, algorithm='HS512').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# empty payload causes auth to fail
encoded_jwt = jwt.encode({}, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
@pytest.mark.parametrize('app_class', [App, ASyncApp])
def test_jwt_anon_allowed(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/anonymous-allowed', 'GET', anon_allowed),
]
components = [JWT({'JWT_SECRET': secret})]
app = app_class(routes=routes, components=components)
client = TestClient(app)
response = client.get('/anonymous-allowed')
assert response.status_code == 200
assert response.json() is None
# client is trying to authenticate, so not anonymous
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Bearer',
})
assert response.status_code == 401
# client is trying to authenticate, so not anonymous
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Basic username',
})
assert response.status_code == 401
payload = {'id': 1, 'username': 'bailey'}
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
# authenticated is also allowed
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
data = response.json()
assert data['id'] == payload['id']
assert data['username'] == payload['username']
assert data['token'] == payload
# wrong secret
encoded_jwt = jwt.encode(payload, 'wrong secret', algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# wrong algorithm
encoded_jwt = jwt.encode(payload, secret, algorithm='HS512').decode(encoding='UTF-8')
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# empty payload causes auth to fail
encoded_jwt = jwt.encode({}, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/anonymous-allowed', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
@pytest.mark.parametrize('app_class', [App, ASyncApp])
def test_jwt_issuer_claim(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/auth-required', 'GET', auth_required),
]
components = [
JWT({
'JWT_SECRET': secret,
'JWT_OPTIONS': {
'issuer': 'urn:foo',
}
})
]
app = app_class(routes=routes, components=components)
client = TestClient(app)
payload = {'user': 1, 'username': 'bailey', 'iss': 'urn:foo'}
# iss claim is correct
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
# iss claim is incorrect
payload['iss'] = 'urn:not-foo'
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# no iss claim included in jwt
del payload['iss']
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
@pytest.mark.parametrize('app_class', [App, ASyncApp])
def test_jwt_audience_claim(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/auth-required', 'GET', auth_required),
]
components = [
JWT({
'JWT_SECRET': secret,
'JWT_OPTIONS': {
'audience': 'urn:foo',
}
})
]
app = app_class(routes=routes, components=components)
client = TestClient(app)
payload = {'user': 1, 'username': 'bailey', 'aud': 'urn:foo'}
# aud claim is single and correct
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
# aud claim is multiple and correct
payload['aud'] = ['urn:bar', 'urn:baz', 'urn:foo']
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
# aud claim is incorrect and single
payload['aud'] = 'urn:not-foo'
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# aud claim is incorrect and multiple
payload['aud'] = ['urn:bar', 'urn:baz', 'urn:not-foo']
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# no aud claim included in jwt
del payload['aud']
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
@pytest.mark.parametrize('app_class', [App, ASyncApp])
def test_jwt_leeway_claim(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/auth-required', 'GET', auth_required),
]
components = [
JWT({
'JWT_SECRET': secret,
'JWT_OPTIONS': {
'leeway': 3,
}
})
]
app = app_class(routes=routes, components=components)
client = TestClient(app)
payload = {'user': 1, 'username': 'bailey', 'exp': datetime.utcnow() - timedelta(seconds=2)}
# exp claim doesn't fail because of leeway
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
# exp claim fails because leeway is only 3 seconds
payload['exp'] = datetime.utcnow() - timedelta(seconds=4)
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 401
# no exp claim included in jwt, leeway doesnt apply
del payload['exp']
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'Bearer {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
@pytest.mark.parametrize("app_class", [App, ASyncApp])
def test_jwt_white_list(app_class) -> None:
secret = "jwt-secret"
class IsAuthenticated:
def on_request(self, jwt_user: JWTUser) -> None:
""" just force authentication for each request"""
components = [JWT({"JWT_SECRET": secret})]
app = app_class(routes=[], components=components, event_hooks=[IsAuthenticated()])
client = TestClient(app)
r = client.get("/schema/")
assert r.json() == "Authorization header is missing."
assert r.status_code == 401
components = [JWT({"JWT_SECRET": secret, "JWT_WHITE_LIST": ["serve_schema"]})]
app = app_class(routes=[], components=components, event_hooks=[IsAuthenticated()])
client = TestClient(app)
r = client.get("/schema/")
assert r.status_code == 200
@pytest.mark.parametrize("app_class", [App, ASyncApp])
def test_jwt_authorization_prefix(app_class) -> None:
secret = 'jwt-secret'
routes = [
Route('/auth-required', 'GET', auth_required),
]
components = [JWT({'JWT_SECRET': secret, 'JWT_AUTHORIZATION_PREFIX': 'jwt'})]
app = app_class(routes=routes, components=components)
client = TestClient(app)
response = client.get('/auth-required')
assert response.status_code == 401
response = client.get('/auth-required', headers={
'Authorization': 'Bearer',
})
assert response.status_code == 401
response = client.get('/auth-required', headers={
'Authorization': 'Basic username',
})
assert response.status_code == 401
payload = {'id': 1, 'username': 'bailey'}
encoded_jwt = jwt.encode(payload, secret, algorithm='HS256').decode(encoding='UTF-8')
response = client.get('/auth-required', headers={
'Authorization': 'JWT {token}'.format(token=encoded_jwt),
})
assert response.status_code == 200
data = response.json()
assert data['id'] == payload['id']
assert data['username'] == payload['username']
assert data['token'] == payload
| 32.876676
| 97
| 0.650575
| 1,411
| 12,263
| 5.54146
| 0.09922
| 0.051157
| 0.063052
| 0.089014
| 0.857654
| 0.831948
| 0.817368
| 0.811357
| 0.802277
| 0.798695
| 0
| 0.018598
| 0.197586
| 12,263
| 372
| 98
| 32.965054
| 0.776016
| 0.059692
| 0
| 0.764045
| 0
| 0
| 0.183668
| 0.002087
| 0
| 0
| 0
| 0
| 0.157303
| 1
| 0.041199
| false
| 0
| 0.029963
| 0.003745
| 0.086142
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc0639ee89c5b13c3e036e5a682de6a10f70cf6c
| 4,447
|
py
|
Python
|
examples/dagster_examples/bay_bikes/pipelines.py
|
zzztimbo/dagster
|
5cf8f159183a80d2364e05bb30362e2798a7af37
|
[
"Apache-2.0"
] | null | null | null |
examples/dagster_examples/bay_bikes/pipelines.py
|
zzztimbo/dagster
|
5cf8f159183a80d2364e05bb30362e2798a7af37
|
[
"Apache-2.0"
] | null | null | null |
examples/dagster_examples/bay_bikes/pipelines.py
|
zzztimbo/dagster
|
5cf8f159183a80d2364e05bb30362e2798a7af37
|
[
"Apache-2.0"
] | null | null | null |
from dagster import ModeDefinition, PresetDefinition, file_relative_path, pipeline
from ..common.resources import postgres_db_info_resource
from .resources import credentials_vault, gcs_client, local_client, mount
from .solids import train_daily_bike_supply_model, trip_etl, weather_etl
MODES = [
ModeDefinition(
name='development',
resource_defs={
'postgres_db': postgres_db_info_resource,
'gcs_client': local_client,
'credentials_vault': credentials_vault,
'volume': mount,
},
description='Mode to be used during local demo.',
),
ModeDefinition(
name='production',
resource_defs={
'postgres_db': postgres_db_info_resource,
'gcs_client': gcs_client,
'credentials_vault': credentials_vault,
'volume': mount,
},
description='Mode to be used on a remote production server',
),
]
WEATHER_INGEST_PRESETS = [
PresetDefinition.from_files(
'dev_weather_etl',
mode='development',
environment_files=[
file_relative_path(__file__, 'environments/dev_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/dev_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/weather.yaml'),
],
solid_subset=['weather_etl'],
),
PresetDefinition.from_files(
'prod_weather_etl',
mode='production',
environment_files=[
file_relative_path(__file__, 'environments/prod_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/prod_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/weather.yaml'),
],
solid_subset=['weather_etl'],
),
]
TRIP_INGEST_PRESETS = [
PresetDefinition.from_files(
'dev_trip_etl',
mode='development',
environment_files=[
file_relative_path(__file__, 'environments/dev_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/dev_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/trips.yaml'),
],
solid_subset=['trip_etl'],
),
PresetDefinition.from_files(
'prod_trip_etl',
mode='production',
environment_files=[
file_relative_path(__file__, 'environments/prod_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/prod_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/trips.yaml'),
],
solid_subset=['trip_etl'],
),
]
TRAINING_PRESETS = [
PresetDefinition.from_files(
'dev_train_daily_bike_supply_model',
mode='development',
environment_files=[
file_relative_path(__file__, 'environments/dev_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/dev_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/training.yaml'),
],
solid_subset=['train_daily_bike_supply_model'],
),
PresetDefinition.from_files(
'prod_train_daily_bike_supply_model',
mode='production',
environment_files=[
file_relative_path(__file__, 'environments/prod_credentials_vault.yaml'),
file_relative_path(__file__, 'environments/prod_database_resources.yaml'),
file_relative_path(__file__, 'environments/file_system_resources.yaml'),
file_relative_path(__file__, 'environments/training.yaml'),
],
solid_subset=['train_daily_bike_supply_model'],
),
]
@pipeline(
mode_defs=MODES, preset_defs=WEATHER_INGEST_PRESETS + TRIP_INGEST_PRESETS + TRAINING_PRESETS,
)
def generate_training_set_and_train_model():
return train_daily_bike_supply_model(weather_etl(), trip_etl())
@pipeline(
mode_defs=MODES, preset_defs=WEATHER_INGEST_PRESETS,
)
def daily_weather_pipeline():
return weather_etl()
| 37.686441
| 97
| 0.683607
| 455
| 4,447
| 6.043956
| 0.147253
| 0.109091
| 0.145455
| 0.174545
| 0.810909
| 0.758909
| 0.710909
| 0.710909
| 0.710909
| 0.673818
| 0
| 0
| 0.218125
| 4,447
| 117
| 98
| 38.008547
| 0.790912
| 0
| 0
| 0.685185
| 0
| 0
| 0.299528
| 0.221947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.037037
| 0.018519
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc6bc06482f5835599323193d8e8f016bdd2818c
| 5,284
|
py
|
Python
|
tests/integration/testdata/start_api/main.py
|
viksrivat/aws-sam-cli
|
079e7a927ee20b80903852f6da8029d4086c889b
|
[
"Apache-2.0"
] | 1
|
2020-12-19T07:34:23.000Z
|
2020-12-19T07:34:23.000Z
|
tests/integration/testdata/start_api/main.py
|
viksrivat/aws-sam-cli
|
079e7a927ee20b80903852f6da8029d4086c889b
|
[
"Apache-2.0"
] | 1
|
2019-08-29T00:20:27.000Z
|
2019-08-29T00:20:27.000Z
|
tests/integration/testdata/start_api/main.py
|
viksrivat/aws-sam-cli
|
079e7a927ee20b80903852f6da8029d4086c889b
|
[
"Apache-2.0"
] | 2
|
2019-08-28T22:24:09.000Z
|
2019-08-28T23:07:00.000Z
|
import json
import sys
import time
def handler(event, context):
return {"statusCode": 200, "body": json.dumps({"hello": "world"})}
def echo_event_handler(event, context):
return {"statusCode": 200, "body": json.dumps(event)}
def echo_event_handler_2(event, context):
event['handler'] = 'echo_event_handler_2'
return {"statusCode": 200, "body": json.dumps(event)}
def echo_integer_body(event, context):
return {"statusCode": 200, "body": 42}
def content_type_setter_handler(event, context):
return {"statusCode": 200, "body": "hello", "headers": {"Content-Type": "text/plain"}}
def only_set_status_code_handler(event, context):
return {"statusCode": 200}
def only_set_body_handler(event, context):
return {"body": json.dumps({"hello": "world"})}
def string_status_code_handler(event, context):
return {"statusCode": "200", "body": json.dumps({"hello": "world"})}
def sleep_10_sec_handler(event, context):
# sleep thread for 10s. This is useful for testing multiple requests
time.sleep(10)
return {"statusCode": 200, "body": json.dumps({"message": "HelloWorld! I just slept and waking up."})}
def write_to_stderr(event, context):
sys.stderr.write("Docker Lambda is writing to stderr")
return {"statusCode": 200, "body": json.dumps({"hello": "world"})}
def write_to_stdout(event, context):
sys.stdout.write("Docker Lambda is writing to stdout")
return {"statusCode": 200, "body": json.dumps({"hello": "world"})}
def invalid_response_returned(event, context):
return "This is invalid"
def invalid_hash_response(event, context):
return {"foo": "bar"}
def base64_response(event, context):
gifImageBase64 = "R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==" # NOQA
return {
"statusCode": 200,
"body": gifImageBase64,
"isBase64Encoded": True,
"headers": {
"Content-Type": "image/gif"
}
}
def echo_base64_event_body(event, context):
return {
"statusCode": 200,
"body": event["body"],
"headers": {
"Content-Type": event["headers"]["Content-Type"]
},
"isBase64Encoded": event["isBase64Encoded"]
}
def multiple_headers(event, context):
return {
"statusCode": 200,
"body": "hello",
"headers": {"Content-Type": "text/plain"},
"multiValueHeaders": {"MyCustomHeader": ['Value1', 'Value2']}
}
def multiple_headers_overrides_headers(event, context):
return {
"statusCode": 200,
"body": "hello",
"headers": {"Content-Type": "text/plain", "MyCustomHeader": 'Custom'},
"multiValueHeaders": {"MyCustomHeader": ['Value1', 'Value2']}
}
| 49.383178
| 2,635
| 0.799205
| 379
| 5,284
| 11.031662
| 0.403694
| 0.048792
| 0.063621
| 0.071514
| 0.190624
| 0.190624
| 0.163358
| 0.143028
| 0.129156
| 0.082038
| 0
| 0.074784
| 0.101628
| 5,284
| 106
| 2,636
| 49.849057
| 0.805983
| 0.013437
| 0
| 0.287879
| 0
| 0.015152
| 0.640691
| 0.499808
| 0
| 1
| 0
| 0
| 0
| 1
| 0.257576
| false
| 0
| 0.045455
| 0.181818
| 0.560606
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
5daebba1b164f94f42f93b9b11635a9961260c88
| 43,931
|
py
|
Python
|
data_utils/datasets.py
|
initc/gpt-lm
|
941f2816d7a749ea3a3e0c574b35fc3fc67e94e3
|
[
"Apache-2.0"
] | 2
|
2019-10-17T04:03:06.000Z
|
2020-05-22T13:27:50.000Z
|
data_utils/datasets.py
|
initc/gpt-lm
|
941f2816d7a749ea3a3e0c574b35fc3fc67e94e3
|
[
"Apache-2.0"
] | 1
|
2020-05-22T13:36:27.000Z
|
2020-05-22T13:36:27.000Z
|
data_utils/datasets.py
|
initc/gpt-lm
|
941f2816d7a749ea3a3e0c574b35fc3fc67e94e3
|
[
"Apache-2.0"
] | 1
|
2019-10-17T04:03:08.000Z
|
2019-10-17T04:03:08.000Z
|
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataset objects for jsons, csvs, and BERT datasets"""
import os
import torch
import random
from data_utils.utils import numpy_seed
import itertools
from torch.utils import data
import numpy as np
from data_utils import indexed_dataset
class CLMTaskDataset(data.Dataset):
def __init__(self, path, tokenizer, batch_size, max_tokens, world_size=1, max_lens=510, seed=512, no_cache=False, use_cls_special=False):
self.sizes = None
self.tokenizer = tokenizer
self.batch_size = batch_size
self.max_tokens = max_tokens
self.world_size = world_size
self.max_lens = max_lens
self.seed = seed + 20
self.no_cache = no_cache
self.use_cls_special = use_cls_special
self.data = self.read_data(path)
self.padding = tokenizer.pad()
self.frozen_indices = self.init_data_indices()
self.frozen_batch = self.fill_batch(list(self.batch_by_size()))
@property
def supports_prefetch(self):
if self.data.supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if self.supports_prefetch:
self.data.prefetch(indices)
print("| Fetch all data into memory")
def fill_batch(self, batches):
size = len(batches)
rng = random.Random(self.seed)
rng.shuffle(batches)
fill_num = self.world_size - size % self.world_size
if fill_num == self.world_size:
return batches
batches.extend(batches[-fill_num:])
assert len(batches) % self.world_size == 0
return batches
def index_len(self):
return len(self.data)
def read_data(self, path):
if self.no_cache:
data = indexed_dataset.IndexedDataset(path, fix_lua_indexing=True)
else:
data = indexed_dataset.IndexedCachedDataset(path, fix_lua_indexing=True)
self.sizes = data.sizes
return data
def init_data_indices(self):
return self.ordered_indices()
def filter_by_size(self, indices):
ignore = []
to_samll = []
for ind in indices:
size = self.num_tokens(ind)
if size > self.max_lens:
ignore.append(ind)
elif size < 10:
to_samll.append(ind)
else:
yield ind
if len(ignore) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(ignore), self.max_lens, ignore[:10]))
if len(to_samll) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(to_samll), 10, to_samll[:10]))
def __len__(self):
return len(self.frozen_indices)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
with numpy_seed(self.seed):
indices = np.random.permutation(len(self.data))
lens = []
for inx_a in indices:
lens.append(self.num_tokens(inx_a))
indices = indices[np.argsort(np.array(lens), kind='mergesort')]
indices = list(self.filter_by_size(indices))
return indices
def num_tokens(self, idx):
return self.sizes[idx]
def batch_by_size(self):
max_tokens = self.max_tokens
indices = self.frozen_indices
batch = []
tokens = 0
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if num_tokens > max_tokens:
return True
if len(batch) == self.batch_size:
return True
return False
for idx in indices:
idx_max_token = self.num_tokens(idx)
tokens = max(tokens, idx_max_token)
if is_batch_full(tokens * (len(batch) + 1)):
yield batch
batch = []
tokens = 0
batch.append(idx)
if len(batch) >= 1:
yield batch
def __getitem__(self, idx):
tokens = self.data[idx].tolist()
task = tokens[0]
tokens = tokens[1:]
token_sep = [self.tokenizer.sep()]
if self.use_cls_special:
token_cls = [self.tokenizer.cls_style(task)]
else:
token_cls = [self.tokenizer.cls()]
lm_labels = tokens + token_sep
tokens = token_cls + tokens
assert len(tokens) == len(lm_labels)
return {"id": int(idx),
"tokens": tokens,
"lm_labels": lm_labels,
"task": task,
"model-type": "CLM"
}
def collate(self, samples):
padding = self.tokenizer.pad()
tensor_size = max([len(candidate["tokens"]) for candidate in samples])
batch = len(samples)
input_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
attention_mask = torch.LongTensor(batch, tensor_size).fill_(0)
lm_labels = torch.LongTensor(batch, tensor_size).fill_(-1)
nsp_labels = torch.LongTensor(batch).fill_(-1)
task_type_ids = None
if not self.use_cls_special:
task_type_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
id_ = torch.LongTensor(batch).fill_(padding)
ntokens = 0
for i, candidates in enumerate(samples):
assert candidates["model-type"].lower() == "clm"
tokens = candidates["tokens"]
lm_tokens = candidates["lm_labels"]
lens = len(tokens)
input_ids[i, :lens] = torch.Tensor(tokens)
lm_labels[i, :lens] = torch.Tensor(lm_tokens)
if not self.use_cls_special:
task_type_ids[i] = candidates["task"]
attention_mask[i, :lens] = 1
ntokens += lens
id_[i] = candidates["id"]
id_[i] = candidates["id"]
batch = {
'id': id_,
'nsentences': batch,
"ntokens": ntokens,
'net_input': {
'input_tokens': input_ids,
'task_type_ids': task_type_ids,
'attention_mask': attention_mask,
'clm': True
},
'target': lm_labels,
'nsp_labels': nsp_labels
}
return batch
class MLMDataset(data.Dataset):
def __init__(self, path, tokenizer, batch_size, max_tokens, world_size=1, max_lens=510, seed=512, mask_lm_prob=0.15, max_preds_per_seq=80, no_cache=False, drop_first_token=False, use_task_embed=False):
self.a_size = None
self.b_size = None
self.tokenizer = tokenizer
self.vocab_words = list(self.tokenizer.text_token_vocab.values())
self.world_size = world_size
self.mask_lm_prob = mask_lm_prob
self.max_preds_per_seq = max_preds_per_seq
self.batch_size = batch_size
self.max_tokens = max_tokens
self.max_lens = max_lens
self.no_cache = no_cache
self.drop_token = drop_first_token
self.use_task_embed = use_task_embed
self.seed = seed
self.data = self.read_data(path)
self.sizes = self.data.sizes
self.padding = tokenizer.pad()
self.frozen_indices = self.init_data_indices()
self.frozen_batch = self.fill_batch(list(self.batch_by_size()))
@property
def supports_prefetch(self):
if self.data.supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if self.supports_prefetch:
self.data.prefetch(indices)
print("| Fetch all data into memory")
def fill_batch(self, batches):
b_size = len(batches)
rng = random.Random(self.seed)
rng.shuffle(batches)
fill_num = self.world_size - b_size % self.world_size
if fill_num == self.world_size:
return batches
batches.extend(batches[-fill_num:])
assert len(batches) % self.world_size == 0
return batches
def index_len(self):
return len(self.sizes)
def read_data(self, path):
if self.no_cache:
data = indexed_dataset.IndexedDataset(path, fix_lua_indexing=True)
else:
data = indexed_dataset.IndexedCachedDataset(path, fix_lua_indexing=True)
return data
def init_data_indices(self):
return self.ordered_indices()
def filter_by_size(self, indices):
ignore = []
error = []
for ind in indices:
size = self.num_tokens(ind)
if size > self.max_lens:
ignore.append(ind)
else:
yield ind
if len(ignore) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(ignore), self.max_lens, ignore[:10]))
if len(error) > 0:
print(
"WARNING: {} samples have too small size and will be skipped, first few sample ids={}".format(
len(error), error[:10]))
def __len__(self):
return len(self.frozen_indices)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
with numpy_seed(self.seed):
indices = np.random.permutation(len(self.data))
lens = []
for inx_a in indices:
lens.append(self.num_tokens(inx_a))
indices = indices[np.argsort(np.array(lens), kind='mergesort')]
indices = list(self.filter_by_size(indices))
return indices
def num_tokens(self, idx):
return self.sizes[idx]
def batch_by_size(self):
max_tokens = self.max_tokens
# n_gpu = self.n_gpu
indices = self.frozen_indices
batch = []
tokens = 0
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if num_tokens > max_tokens:
return True
# if len(batch) == self.batch_size:
# return True
return False
for idx in indices:
idx_max_token = self.num_tokens(idx)
tokens = max(tokens, idx_max_token)
if is_batch_full(tokens * (len(batch) + 1)):
yield batch
batch = []
tokens = 0
batch.append(idx)
if len(batch) >= 1:
yield batch
def __getitem__(self, idx):
rng = random.Random(int(idx))
tokens = self.data[idx].tolist()
if self.use_task_embed:
assert not self.drop_token
task_type = None
if self.use_task_embed:
task_type = tokens[0]
tokens = tokens[1:]
if self.drop_token:
tokens = tokens[1:]
tokens, mask, mask_labels = self.create_masked_lm_predictions(tokens, self.mask_lm_prob,
self.max_preds_per_seq,
self.vocab_words, rng)
assert len(tokens) == len(mask_labels)
return {"id": int(idx),
"tokens": tokens,
"lm_labels": mask_labels,
"ntokens": sum(mask),
"task_type": task_type,
"model-type": "MLM"
}
def create_masked_lm_predictions(self, tokens, mask_lm_prob, max_preds_per_seq, vocab_words, rng):
token_sep = [self.tokenizer.sep()]
token_cls = [self.tokenizer.cls()]
tokens = token_cls + tokens + token_sep
cand_indices = [idx for idx in range(1, len(tokens) - 1)]
rng.shuffle(cand_indices)
num_to_predict = min(max_preds_per_seq, max(1, int(round(len(tokens) * mask_lm_prob))))
mask = [0] * len(tokens)
mask_labels = [-1] * len(tokens)
for idx in cand_indices[:num_to_predict]:
mask[idx] = 1
label = self.mask_token(idx, tokens, vocab_words, rng)
mask_labels[idx] = label
return tokens, mask, mask_labels
def mask_token(self, idx, tokens, vocab_words, rng):
label = tokens[idx]
if rng.random() < 0.8:
new_label = self.tokenizer.mask()
else:
if rng.random() < 0.5:
new_label = label
else:
new_label = rng.choice(vocab_words)
tokens[idx] = new_label
return label
def collate(self, samples):
padding = self.tokenizer.pad()
tensor_size = max([len(candidate["tokens"]) for candidate in samples])
batch = len(samples)
input_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
task_type_ids = None
if self.use_task_embed:
task_type_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
attention_mask = torch.LongTensor(batch, tensor_size).fill_(0)
lm_labels = torch.LongTensor(batch, tensor_size).fill_(-1)
nsp_labels = torch.LongTensor(batch).fill_(-1)
id_ = torch.LongTensor(batch).fill_(padding)
ntokens = 0
for i, candidates in enumerate(samples):
assert candidates["model-type"].lower() == "mlm"
tokens = candidates["tokens"]
task_type = candidates["task_type"]
lm_tokens = candidates["lm_labels"]
lens = len(tokens)
input_ids[i, :lens] = torch.Tensor(tokens)
if self.use_task_embed:
task_type_ids[i] = task_type
lm_labels[i, :lens] = torch.Tensor(lm_tokens)
attention_mask[i, :lens] = 1
ntokens += candidates['ntokens']
id_[i] = candidates["id"]
batch = {
'id': id_,
'nsentences': batch,
"ntokens": ntokens,
'net_input': {
'input_tokens': input_ids,
'task_type_ids': task_type_ids,
'attention_mask': attention_mask,
'clm': False
},
'target': lm_labels,
'nsp_labels': nsp_labels
}
return batch
class CLMDataset(data.Dataset):
def __init__(self, path, tokenizer, batch_size, max_tokens, world_size=1, max_lens=510, seed=512, no_cache=False,
drop_first_token=False):
self.sizes = None
self.tokenizer = tokenizer
self.batch_size = batch_size
self.max_tokens = max_tokens
self.world_size = world_size
self.max_lens = max_lens
self.seed = seed + 20
self.no_cache = no_cache
self.drop_token = drop_first_token
self.data = self.read_data(path)
self.padding = tokenizer.pad()
self.frozen_indices = self.init_data_indices()
self.frozen_batch = self.fill_batch(list(self.batch_by_size()))
@property
def supports_prefetch(self):
if self.data.supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if self.supports_prefetch:
self.data.prefetch(indices)
print("| Fetch all data into memory")
def fill_batch(self, batches):
size = len(batches)
rng = random.Random(self.seed)
rng.shuffle(batches)
fill_num = self.world_size - size % self.world_size
if fill_num == self.world_size:
return batches
batches.extend(batches[-fill_num:])
assert len(batches) % self.world_size == 0
return batches
def index_len(self):
return len(self.data)
def read_data(self, path):
if self.no_cache:
data = indexed_dataset.IndexedDataset(path, fix_lua_indexing=True)
else:
data = indexed_dataset.IndexedCachedDataset(path, fix_lua_indexing=True)
self.sizes = data.sizes
return data
def init_data_indices(self):
return self.ordered_indices()
def filter_by_size(self, indices):
ignore = []
to_samll = []
for ind in indices:
size = self.num_tokens(ind)
if size > self.max_lens:
ignore.append(ind)
elif size < 10:
to_samll.append(ind)
else:
yield ind
if len(ignore) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(ignore), self.max_lens, ignore[:10]))
if len(to_samll) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, min-qa-len={}, first few sample ids={}".format(
len(to_samll), 10, to_samll[:10]))
def __len__(self):
return len(self.frozen_indices)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
with numpy_seed(self.seed):
indices = np.random.permutation(len(self.data))
lens = []
for inx_a in indices:
lens.append(self.num_tokens(inx_a))
indices = indices[np.argsort(np.array(lens), kind='mergesort')]
indices = list(self.filter_by_size(indices))
return indices
def num_tokens(self, idx):
return self.sizes[idx]
def batch_by_size(self):
max_tokens = self.max_tokens
indices = self.frozen_indices
batch = []
tokens = 0
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if num_tokens > max_tokens:
return True
# if len(batch) == self.batch_size:
# return True
return False
for idx in indices:
idx_max_token = self.num_tokens(idx)
tokens = max(tokens, idx_max_token)
if is_batch_full(tokens * (len(batch) + 1)):
yield batch
batch = []
tokens = 0
batch.append(idx)
if len(batch) >= 1:
yield batch
def __getitem__(self, idx):
tokens = self.data[idx].tolist()
if self.drop_token:
tokens = tokens[1:]
token_sep = [self.tokenizer.sep()]
token_cls = [self.tokenizer.cls()]
lm_labels = tokens + token_sep
tokens = token_cls + tokens
assert len(tokens) == len(lm_labels)
return {"id": int(idx),
"tokens": tokens,
"lm_labels": lm_labels,
"model-type": "CLM"
}
def collate(self, samples):
padding = self.tokenizer.pad()
tensor_size = max([len(candidate["tokens"]) for candidate in samples])
batch = len(samples)
input_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
attention_mask = torch.LongTensor(batch, tensor_size).fill_(0)
lm_labels = torch.LongTensor(batch, tensor_size).fill_(-1)
nsp_labels = torch.LongTensor(batch).fill_(-1)
id_ = torch.LongTensor(batch).fill_(padding)
ntokens = 0
for i, candidates in enumerate(samples):
assert candidates["model-type"].lower() == "clm"
tokens = candidates["tokens"]
lm_tokens = candidates["lm_labels"]
lens = len(tokens)
input_ids[i, :lens] = torch.Tensor(tokens)
lm_labels[i, :lens] = torch.Tensor(lm_tokens)
attention_mask[i, :lens] = 1
ntokens += lens
id_[i] = candidates["id"]
id_[i] = candidates["id"]
batch = {
'id': id_,
'nsentences': batch,
"ntokens": ntokens,
'net_input': {
'input_tokens': input_ids,
'attention_mask': attention_mask,
'clm': True
},
'target': lm_labels,
'nsp_labels': nsp_labels
}
return batch
class BertPairDataset(data.Dataset):
def __init__(self, path, tokenizer, batch_size, max_tokens, world_size=1, max_lens=510, seed=512, mask_lm_prob=0.15,
max_preds_per_seq=80, no_cache=False):
self.a_size = None
self.b_size = None
self.tokenizer = tokenizer
self.vocab_words = list(self.tokenizer.text_token_vocab.values())
self.world_size = world_size
self.mask_lm_prob = mask_lm_prob
self.max_preds_per_seq = max_preds_per_seq
self.batch_size = batch_size
self.max_tokens = max_tokens
self.max_lens = max_lens
self.no_cache = no_cache
self.seed = seed
self.data = self.read_data(path)
self.padding = tokenizer.pad()
self.frozen_indices = self.init_data_indices()
# if self.data[0].supports_prefetch and self.data[1].supports_prefetch:
# self.data[0].prefetch(self.frozen_indices)
# self.data[1].prefetch(self.frozen_indices)
self.frozen_batch = self.fill_batch(list(self.batch_by_size()))
@property
def supports_prefetch(self):
if self.data[0].supports_prefetch and self.data[1].supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if self.supports_prefetch:
self.data[0].prefetch(indices)
self.data[1].prefetch(indices)
print("| Fetch all data into memory")
def fill_batch(self, batches):
b_size = len(batches)
rng = random.Random(self.seed)
rng.shuffle(batches)
fill_num = self.world_size - b_size % self.world_size
if fill_num == self.world_size:
return batches
batches.extend(batches[-fill_num:])
assert len(batches) % self.world_size == 0
return batches
def index_len(self):
return len(self.a_size)
def read_data(self, path):
A_path = path + "-A"
B_path = path + "-B"
if self.no_cache:
A_data = indexed_dataset.IndexedDataset(A_path, fix_lua_indexing=True)
B_data = indexed_dataset.IndexedDataset(B_path, fix_lua_indexing=True)
else:
A_data = indexed_dataset.IndexedCachedDataset(A_path, fix_lua_indexing=True)
B_data = indexed_dataset.IndexedCachedDataset(B_path, fix_lua_indexing=True)
self.a_size = A_data.sizes
self.b_size = B_data.sizes
return (A_data, B_data)
def init_data_indices(self):
return self.ordered_indices()
def filter_by_size(self, indices):
ignore = []
error = []
for ind in indices:
size = self.num_tokens(ind)
a_size = self.a_size[ind]
b_size = self.b_size[ind]
if size > self.max_lens:
ignore.append(ind)
elif a_size <= 1 or b_size < 1:
error.append(ind)
else:
yield ind
if len(ignore) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(ignore), self.max_lens, ignore[:10]))
if len(error) > 0:
print(
"WARNING: {} samples have too small size and will be skipped, first few sample ids={}".format(
len(error), error[:10]))
def __len__(self):
return len(self.frozen_indices)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
with numpy_seed(self.seed):
indices = np.random.permutation(len(self.data[0]))
lens = []
for inx_a in indices:
lens.append(self.num_tokens(inx_a))
indices = indices[np.argsort(np.array(lens), kind='mergesort')]
indices = list(self.filter_by_size(indices))
return indices
def num_tokens(self, idx):
return self.a_size[idx] + self.b_size[idx]
def batch_by_size(self):
max_tokens = self.max_tokens
# n_gpu = self.n_gpu
indices = self.frozen_indices
batch = []
tokens = 0
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if num_tokens > max_tokens:
return True
# if len(batch) == self.batch_size:
# return True
return False
for idx in indices:
idx_max_token = self.num_tokens(idx)
tokens = max(tokens, idx_max_token)
if is_batch_full(tokens * (len(batch) + 1)):
yield batch
batch = []
tokens = 0
batch.append(idx)
if len(batch) >= 1:
yield batch
def __getitem__(self, idx):
rng = random.Random(int(idx))
tokens_a = self.data[0][idx].tolist()
tokens_b = self.data[1][idx].tolist()
type = tokens_a[0]
tokens_a = tokens_a[1:]
tokens, token_type, mask, mask_labels = self.create_masked_lm_predictions(tokens_a, tokens_b, type,
self.mask_lm_prob,
self.max_preds_per_seq,
self.vocab_words, rng)
assert len(tokens) == len(token_type)
return {"id": int(idx),
"tokens": tokens,
"tokens_type": token_type,
"lm_labels": mask_labels,
"ntokens": sum(mask),
"data-type": type,
"model-type": "MLM"
}
def create_masked_lm_predictions(self, a, b, type, mask_lm_prob, max_preds_per_seq, vocab_words, rng):
token_sep = [self.tokenizer.sep()]
token_cls = [self.tokenizer.cls()]
tokens = token_cls + a + token_sep + b + token_sep
type_a = [0] * len(a)
if type == 0 or type == 1:
type_b = [1] * len(b)
else:
type_b = [0] * len(b)
token_type = [type_a[0]] + type_a + [type_a[0]] + type_b + [type_b[0]]
assert len(token_type) == len(tokens)
cand_indices = [idx + 1 for idx in range(len(a))] + [idx + 2 + len(a) for idx in range(len(b))]
rng.shuffle(cand_indices)
num_to_predict = min(max_preds_per_seq, max(1, int(round(len(tokens) * mask_lm_prob))))
mask = [0] * len(tokens)
mask_labels = [-1] * len(tokens)
for idx in cand_indices[:num_to_predict]:
mask[idx] = 1
label = self.mask_token(idx, tokens, vocab_words, rng)
mask_labels[idx] = label
return tokens, token_type, mask, mask_labels
def mask_token(self, idx, tokens, vocab_words, rng):
label = tokens[idx]
if rng.random() < 0.8:
new_label = self.tokenizer.mask()
else:
if rng.random() < 0.5:
new_label = label
else:
new_label = rng.choice(vocab_words)
tokens[idx] = new_label
return label
def collate(self, samples):
padding = self.tokenizer.pad()
tensor_size = max([len(candidate["tokens"]) for candidate in samples])
batch = len(samples)
input_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
token_type_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
attention_mask = torch.LongTensor(batch, tensor_size).fill_(0)
lm_labels = torch.LongTensor(batch, tensor_size).fill_(-1)
nsp_labels = torch.LongTensor(batch).fill_(-1)
id_ = torch.LongTensor(batch).fill_(padding)
ntokens = 0
for i, candidates in enumerate(samples):
assert candidates["model-type"].lower() == "mlm"
tokens = candidates["tokens"]
types = candidates["tokens_type"]
lm_tokens = candidates["lm_labels"]
lens = len(tokens)
input_ids[i, :lens] = torch.Tensor(tokens)
token_type_ids[i, :lens] = torch.Tensor(types)
lm_labels[i, :lens] = torch.Tensor(lm_tokens)
attention_mask[i, :lens] = 1
nsp_labels[i] = candidates["data-type"]
ntokens += candidates['ntokens']
id_[i] = candidates["id"]
batch = {
'id': id_,
'nsentences': batch,
"ntokens": ntokens,
'net_input': {
'input_tokens': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
'clm': False
},
'target': lm_labels,
'nsp_labels': nsp_labels
}
return batch
def get_dummy_batch(self):
bsz = 2
src_len = 20
return self.collate([
{
'id': i,
'Q': torch.Tensor(src_len).uniform_(2, 20).long().tolist(),
'A': torch.Tensor(src_len).uniform_(2, 20).long().tolist(),
'style': 0
}
for i in range(bsz)
])
class GPTDataset(data.Dataset):
def __init__(self, path, tokenizer, batch_size, max_tokens, world_size=1, max_lens=510, seed=512, no_cache=False):
self.a_size = None
self.b_size = None
self.tokenizer = tokenizer
self.batch_size = batch_size
self.max_tokens = max_tokens
self.world_size = world_size
self.max_lens = max_lens
self.seed = seed
self.no_cache = no_cache
self.data = self.read_data(path)
self.padding = tokenizer.pad()
self.frozen_indices = self.init_data_indices()
self.frozen_batch = self.fill_batch(list(self.batch_by_size()))
@property
def supports_prefetch(self):
if self.data[0].supports_prefetch and self.data[1].supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if self.supports_prefetch:
self.data[0].prefetch(indices)
self.data[1].prefetch(indices)
print("| Fetch all data into memory")
def fill_batch(self, batches):
b_size = len(batches)
rng = random.Random(self.seed)
rng.shuffle(batches)
fill_num = self.world_size - b_size % self.world_size
if fill_num == self.world_size:
return batches
batches.extend(batches[-fill_num:])
assert len(batches) % self.world_size == 0
return batches
def index_len(self):
return len(self.a_size)
def read_data(self, path):
A_path = path + "-A"
B_path = path + "-B"
if self.no_cache:
A_data = indexed_dataset.IndexedDataset(A_path, fix_lua_indexing=True)
B_data = indexed_dataset.IndexedDataset(B_path, fix_lua_indexing=True)
else:
A_data = indexed_dataset.IndexedCachedDataset(A_path, fix_lua_indexing=True)
B_data = indexed_dataset.IndexedCachedDataset(B_path, fix_lua_indexing=True)
self.a_size = A_data.sizes
self.b_size = B_data.sizes
return (A_data, B_data)
def init_data_indices(self):
return self.ordered_indices()
def filter_by_size(self, indices):
ignore = []
for ind in indices:
size = self.num_tokens(ind)
if size > self.max_lens:
ignore.append(ind)
else:
yield ind
if len(ignore) > 0:
print(
"WARNING: {} samples have invalid sizes and will be skipped, max-qa-len={}, first few sample ids={}".format(
len(ignore), self.max_lens, ignore[:10]))
def __len__(self):
return len(self.frozen_indices)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
with numpy_seed(self.seed):
indices = np.random.permutation(len(self.data[0]))
lens = []
for inx_a in indices:
lens.append(self.num_tokens(inx_a))
indices = indices[np.argsort(np.array(lens), kind='mergesort')]
indices = list(self.filter_by_size(indices))
return indices
def num_tokens(self, idx):
return self.a_size[idx] + self.b_size[idx]
def batch_by_size(self):
max_tokens = self.max_tokens
indices = self.frozen_indices
batch = []
tokens = 0
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if num_tokens > max_tokens:
return True
# if len(batch) == self.batch_size:
# return True
return False
for idx in indices:
idx_max_token = self.num_tokens(idx)
tokens = max(tokens, idx_max_token)
if is_batch_full(tokens * (len(batch) + 1)):
yield batch
batch = []
tokens = 0
batch.append(idx)
if len(batch) >= 1:
yield batch
def __getitem__(self, idx):
tokens_a = self.data[0][idx].tolist()
tokens_b = self.data[1][idx].tolist()
token_sep = [self.tokenizer.sep()]
token_cls = [self.tokenizer.cls()]
tokens = token_cls + tokens_a + token_sep + tokens_b
lm_labels = tokens_a + token_sep + tokens_b + token_sep
assert len(tokens) == len(lm_labels)
return {"id": int(idx),
"tokens": tokens,
"lm_labels": lm_labels,
"model-type": "CLM"
}
def collate(self, samples):
padding = self.tokenizer.pad()
tensor_size = max([len(candidate["tokens"]) for candidate in samples])
batch = len(samples)
input_ids = torch.LongTensor(batch, tensor_size).fill_(padding)
attention_mask = torch.LongTensor(batch, tensor_size).fill_(0)
lm_labels = torch.LongTensor(batch, tensor_size).fill_(-1)
nsp_labels = torch.LongTensor(batch).fill_(-1)
id_ = torch.LongTensor(batch).fill_(padding)
ntokens = 0
for i, candidates in enumerate(samples):
assert candidates["model-type"].lower() == "clm"
tokens = candidates["tokens"]
lm_tokens = candidates["lm_labels"]
lens = len(tokens)
input_ids[i, :lens] = torch.Tensor(tokens)
lm_labels[i, :lens] = torch.Tensor(lm_tokens)
attention_mask[i, :lens] = 1
ntokens += lens
id_[i] = candidates["id"]
id_[i] = candidates["id"]
batch = {
'id': id_,
'nsentences': batch,
"ntokens": ntokens,
'net_input': {
'input_tokens': input_ids,
'attention_mask': attention_mask,
'clm': True
},
'target': lm_labels,
'nsp_labels': nsp_labels
}
return batch
class FuseDataset(data.Dataset):
def __init__(self, dataload_a, dataload_b, world_size=1, seed=1):
self.dataload_a = dataload_a
self.dataload_b = dataload_b
self.world_size = world_size
self.lens_a = dataload_a.index_len()
self.init_seed = seed + 256
self.fuse_lens = len(dataload_a) + len(dataload_b)
self.frozen_batch = self.init_batch()
@property
def supports_prefetch(self):
if self.dataload_a.supports_prefetch and self.dataload_b.supports_prefetch:
return True
else:
return False
def prefetch(self, indices):
if not self.dataload_a.supports_prefetch or not self.dataload_b.supports_prefetch:
return
a_indices = []
b_indices = []
for idx in indices:
if idx >= self.lens_a:
idx -= self.lens_a
b_indices.append(idx)
else:
a_indices.append(idx)
self.dataload_a.prefetch(a_indices)
self.dataload_b.prefetch(b_indices)
def init_batch(self):
rng = random.Random(self.init_seed)
batch_a = self.dataload_a.frozen_batch
batch_b = self.dataload_b.frozen_batch
rng.shuffle(batch_a)
rng.shuffle(batch_b)
lens_a = self.lens_a
fake_b = []
for b in batch_b:
fake_b.append([i + lens_a for i in b])
batch_a.extend(fake_b)
return self.shuffle_by_size(batch_a, rng)
def shuffle_by_size(self, batches, rng):
assert len(batches) % self.world_size == 0
rand_size = len(batches) // self.world_size
indices = list(range(rand_size))
rng.shuffle(indices)
new_batches = []
for idx in indices:
new_batches.extend(batches[idx * self.world_size:(idx + 1) * self.world_size])
assert len(new_batches) == len(batches)
return new_batches
@classmethod
def load_dataset(self, tokenizer, args):
train_path = args.data
train_prefix = args.train_prefix
valid_prefix = args.valid_prefix
max_lens = args.max_lens
valid_data = []
for valid_file in valid_prefix.split(","):
path = os.path.join(train_path, valid_file)
if "MLM" in valid_file:
valid_data.append(
BertPairDataset(path, tokenizer, args.valid_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, mask_lm_prob=args.mask_lm_prob,
max_preds_per_seq=args.max_preds_per_seq, no_cache=args.no_cache))
else:
valid_data.append(
GPTDataset(path, tokenizer, args.valid_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, no_cache=args.no_cache))
train_mlm = os.path.join(train_path, train_prefix + "MLM")
train_clm = os.path.join(train_path, train_prefix + "CLM")
mlm_data = BertPairDataset(train_mlm, tokenizer, args.train_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, mask_lm_prob=args.mask_lm_prob,
max_preds_per_seq=args.max_preds_per_seq, no_cache=args.no_cache)
clm_data = GPTDataset(train_clm, tokenizer, args.train_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, no_cache=args.no_cache)
train_loader = FuseDataset(mlm_data, clm_data)
return train_loader, valid_data
@classmethod
def load_dataset_no_nsp(self, tokenizer, args):
# pdb.set_trace()
print("| Load dataset with no nsp loss")
use_task_embed = args.use_task_embed
if use_task_embed:
print("| Training with task embedding")
else:
print("| Training with no task embedding")
train_path = args.data
train_prefix = args.train_prefix
valid_prefix = args.valid_prefix
max_lens = args.max_lens
valid_data = []
for valid_file in valid_prefix.split(","):
path = os.path.join(train_path, valid_file)
if "MLM" in valid_file:
valid_data.append(
MLMDataset(path, tokenizer, args.valid_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, mask_lm_prob=args.mask_lm_prob,
max_preds_per_seq=args.max_preds_per_seq, no_cache=args.no_cache,
drop_first_token=args.drop_first_token, use_task_embed=args.use_task_embed))
else:
if use_task_embed:
valid_data.append(
CLMTaskDataset(path, tokenizer, args.train_batch, args.max_tokens, world_size=args.world_size,
max_lens=args.max_lens, no_cache=args.no_cache))
else:
valid_data.append(
CLMDataset(path, tokenizer, args.valid_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, no_cache=args.no_cache, drop_first_token=args.drop_first_token))
train_mlm = os.path.join(train_path, train_prefix + "MLM")
train_clm = os.path.join(train_path, train_prefix + "CLM")
mlm_data = MLMDataset(train_mlm, tokenizer, args.train_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, mask_lm_prob=args.mask_lm_prob,
max_preds_per_seq=args.max_preds_per_seq, no_cache=args.no_cache,
drop_first_token=args.drop_first_token, use_task_embed=args.use_task_embed)
if use_task_embed:
clm_data = CLMTaskDataset(train_clm, tokenizer, args.train_batch, args.max_tokens,
world_size=args.world_size,
max_lens=args.max_lens, no_cache=args.no_cache)
else:
clm_data = CLMDataset(train_clm, tokenizer, args.valid_batch, args.max_tokens, world_size=args.world_size,
max_lens=max_lens, no_cache=args.no_cache, drop_first_token=args.drop_first_token)
train_loader = FuseDataset(mlm_data, clm_data)
return train_loader, valid_data
def __len__(self):
return len(self.frozen_batch)
def __getitem__(self, idx):
if idx >= self.lens_a:
idx -= self.lens_a
return self.dataload_b[idx]
else:
return self.dataload_a[idx]
def collate(self, samples):
data_type = samples[0]["model-type"]
if data_type.lower() == "mlm":
return self.dataload_a.collate(samples)
else:
return self.dataload_b.collate(samples)
class FuseSampler(object):
def __init__(self, dataset, num_shards, shard_id):
if num_shards > 1 and shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
if num_shards == 1:
shard_id = 0
batches = dataset.frozen_batch
self._sharded_len = len(batches) // num_shards
# iterable = batches[:self._sharded_len*num_shards]
iterable = batches[shard_id::num_shards]
indices = []
for batch in iterable:
indices.extend(batch)
dataset.prefetch(indices)
self.itr = [item[1] for item in list(itertools.zip_longest(
range(self._sharded_len),
iterable,
))]
def __len__(self):
return self._sharded_len
def __iter__(self):
return iter(self.itr)
def __next__(self):
return next(self.itr)[1]
| 36.487542
| 205
| 0.572648
| 5,376
| 43,931
| 4.432292
| 0.053013
| 0.023418
| 0.016367
| 0.011751
| 0.859745
| 0.846945
| 0.838677
| 0.82936
| 0.822646
| 0.807831
| 0
| 0.007755
| 0.327787
| 43,931
| 1,203
| 206
| 36.517872
| 0.799126
| 0.034964
| 0
| 0.812745
| 0
| 0.006863
| 0.048685
| 0
| 0
| 0
| 0
| 0
| 0.018627
| 1
| 0.092157
| false
| 0
| 0.007843
| 0.023529
| 0.20098
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dbe91bfef0069ef3d979640cca95d3af61c04cb
| 17,429
|
py
|
Python
|
boa3_test/tests/examples_tests/test_NEP17.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/examples_tests/test_NEP17.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/examples_tests/test_NEP17.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from boa3.boa3 import Boa3
from boa3.constants import GAS_SCRIPT, NEO_SCRIPT
from boa3.neo import to_script_hash
from boa3.neo.cryptography import hash160
from boa3.neo.vm.type.String import String
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.TestExecutionException import TestExecutionException
from boa3_test.tests.test_classes.testengine import TestEngine
class TestNEP17Template(BoaTest):
default_folder: str = 'examples'
OWNER_SCRIPT_HASH = bytes(20)
OTHER_ACCOUNT_1 = to_script_hash(b'NiNmXL8FjEUEs1nfX9uHFBNaenxDHJtmuB')
OTHER_ACCOUNT_2 = bytes(range(20))
def test_nep17_compile(self):
path = self.get_contract_path('NEP17.py')
Boa3.compile(path)
def test_nep17_deploy(self):
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
# needs the owner signature
result = self.run_smart_contract(engine, path, method='deploy',
expected_result_type=bool)
self.assertEqual(False, result)
# should return false if the signature isn't from the owner
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OTHER_ACCOUNT_1],
expected_result_type=bool)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
# must always return false after first execution
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(False, result)
def test_nep17_symbol(self):
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'symbol')
self.assertEqual('NEP17', result)
def test_nep17_decimals(self):
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'decimals')
self.assertEqual(8, result)
def test_nep17_total_supply(self):
total_supply = 10_000_000 * 10 ** 8
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'totalSupply')
self.assertEqual(0, result)
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, 'totalSupply')
self.assertEqual(total_supply, result)
def test_nep17_total_balance_of(self):
total_supply = 10_000_000 * 10 ** 8
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
self.assertEqual(0, result)
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
self.assertEqual(total_supply, result)
# should fail when the script length is not 20
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'balanceOf', bytes(10))
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'balanceOf', bytes(30))
@unittest.skip('Examples need to be changed to test with the latest Neo version')
def test_nep17_total_transfer(self):
transferred_amount = 10 * 10 ** 8 # 10 tokens
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
# should fail before running deploy
result = self.run_smart_contract(engine, path, 'transfer',
self.OWNER_SCRIPT_HASH, self.OTHER_ACCOUNT_1, transferred_amount, None,
expected_result_type=bool)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
# should fail if the sender doesn't sign
result = self.run_smart_contract(engine, path, 'transfer',
self.OWNER_SCRIPT_HASH, self.OTHER_ACCOUNT_1, transferred_amount, None,
expected_result_type=bool)
self.assertEqual(False, result)
# should fail if the sender doesn't have enough balance
result = self.run_smart_contract(engine, path, 'transfer',
self.OTHER_ACCOUNT_1, self.OWNER_SCRIPT_HASH, transferred_amount, None,
signer_accounts=[self.OTHER_ACCOUNT_1],
expected_result_type=bool)
self.assertEqual(False, result)
# should fail when any of the scripts' length is not 20
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'transfer',
self.OWNER_SCRIPT_HASH, bytes(10), transferred_amount, "")
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'transfer',
bytes(10), self.OTHER_ACCOUNT_1, transferred_amount, "")
# should fail when the amount is less than 0
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'transfer',
self.OTHER_ACCOUNT_1, self.OWNER_SCRIPT_HASH, -10, None)
# fire the transfer event when transferring to yourself
balance_before = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
result = self.run_smart_contract(engine, path, 'transfer',
self.OWNER_SCRIPT_HASH, self.OWNER_SCRIPT_HASH, transferred_amount, None,
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(1, len(transfer_events))
self.assertEqual(3, len(transfer_events[0].arguments))
sender, receiver, amount = transfer_events[0].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.OWNER_SCRIPT_HASH, sender)
self.assertEqual(self.OWNER_SCRIPT_HASH, receiver)
self.assertEqual(transferred_amount, amount)
# transferring to yourself doesn't change the balance
balance_after = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
self.assertEqual(balance_before, balance_after)
# does fire the transfer event
balance_sender_before = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
balance_receiver_before = self.run_smart_contract(engine, path, 'balanceOf', self.OTHER_ACCOUNT_1)
result = self.run_smart_contract(engine, path, 'transfer',
self.OWNER_SCRIPT_HASH, self.OTHER_ACCOUNT_1, transferred_amount, None,
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(1, len(transfer_events))
self.assertEqual(3, len(transfer_events[0].arguments))
sender, receiver, amount = transfer_events[0].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.OWNER_SCRIPT_HASH, sender)
self.assertEqual(self.OTHER_ACCOUNT_1, receiver)
self.assertEqual(transferred_amount, amount)
# transferring to someone other than yourself does change the balance
balance_sender_after = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
balance_receiver_after = self.run_smart_contract(engine, path, 'balanceOf', self.OTHER_ACCOUNT_1)
self.assertEqual(balance_sender_before - transferred_amount, balance_sender_after)
self.assertEqual(balance_receiver_before + transferred_amount, balance_receiver_after)
@unittest.skip('Examples need to be changed to test with the latest Neo version')
def test_nep17_onPayment(self):
transferred_amount = 10 * 10 ** 8 # 10 tokens
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
engine.add_contract(path.replace('.py', '.nef'))
output, manifest = self.compile_and_save(path)
nep17_address = hash160(output)
result = self.run_smart_contract(engine, path, 'deploy',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
engine.add_neo(self.OWNER_SCRIPT_HASH, transferred_amount)
engine.add_gas(self.OWNER_SCRIPT_HASH, transferred_amount)
# transferring NEO to the smart contract
# saving the balance before the transfer to be able to compare after it
neo_balance_sender_before = self.run_smart_contract(engine, NEO_SCRIPT, 'balanceOf', self.OWNER_SCRIPT_HASH)
neo_balance_nep17_before = self.run_smart_contract(engine, NEO_SCRIPT, 'balanceOf', nep17_address)
nep17_balance_sender_before = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
result = self.run_smart_contract(engine, NEO_SCRIPT, 'transfer',
self.OWNER_SCRIPT_HASH, nep17_address, transferred_amount, None,
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(2, len(transfer_events))
self.assertEqual(3, len(transfer_events[0].arguments))
self.assertEqual(3, len(transfer_events[1].arguments))
# this is the event NEO emitted
sender, receiver, amount = transfer_events[0].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.OWNER_SCRIPT_HASH, sender)
self.assertEqual(nep17_address, receiver)
self.assertEqual(transferred_amount, amount)
# this is the event NEP17 emitted
sender, receiver, amount = transfer_events[1].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(None, sender)
self.assertEqual(self.OWNER_SCRIPT_HASH, receiver)
# transferred_amount is multiplied by 10, because this smart contract is minting the NEO received
self.assertEqual(transferred_amount * 10, amount)
# saving the balance after the transfer to compare it with the previous data
neo_balance_sender_after = self.run_smart_contract(engine, NEO_SCRIPT, 'balanceOf', self.OWNER_SCRIPT_HASH)
neo_balance_nep17_after = self.run_smart_contract(engine, NEO_SCRIPT, 'balanceOf', nep17_address)
nep17_balance_sender_after = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
self.assertEqual(neo_balance_sender_before - transferred_amount, neo_balance_sender_after)
self.assertEqual(neo_balance_nep17_before + transferred_amount, neo_balance_nep17_after)
# transferred_amount is multiplied by 10, because this smart contract is minting the NEO received
self.assertEqual(nep17_balance_sender_before + transferred_amount * 10, nep17_balance_sender_after)
# transferring GAS to the smart contract
# saving the balance before the transfer to be able to compare after it
gas_balance_sender_before = self.run_smart_contract(engine, GAS_SCRIPT, 'balanceOf', self.OWNER_SCRIPT_HASH)
gas_balance_nep17_before = self.run_smart_contract(engine, GAS_SCRIPT, 'balanceOf', nep17_address)
nep17_balance_sender_before = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
result = self.run_smart_contract(engine, GAS_SCRIPT, 'transfer',
self.OWNER_SCRIPT_HASH, nep17_address, transferred_amount, None,
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
transfer_events = engine.get_events('Transfer')
self.assertEqual(2, len(transfer_events))
self.assertEqual(3, len(transfer_events[0].arguments))
self.assertEqual(3, len(transfer_events[1].arguments))
# this is the event GAS emitted
sender, receiver, amount = transfer_events[0].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(self.OWNER_SCRIPT_HASH, sender)
self.assertEqual(nep17_address, receiver)
self.assertEqual(transferred_amount, amount)
# this is the event NEP17 emitted
sender, receiver, amount = transfer_events[1].arguments
if isinstance(sender, str):
sender = String(sender).to_bytes()
if isinstance(receiver, str):
receiver = String(receiver).to_bytes()
self.assertEqual(None, sender)
self.assertEqual(self.OWNER_SCRIPT_HASH, receiver)
# transferred_amount is multiplied by 2, because this smart contract is minting the GAS received
self.assertEqual(transferred_amount * 2, amount)
# saving the balance after the transfer to compare it with the previous data
gas_balance_sender_after = self.run_smart_contract(engine, GAS_SCRIPT, 'balanceOf', self.OWNER_SCRIPT_HASH)
gas_balance_nep17_after = self.run_smart_contract(engine, GAS_SCRIPT, 'balanceOf', nep17_address)
nep17_balance_sender_after = self.run_smart_contract(engine, path, 'balanceOf', self.OWNER_SCRIPT_HASH)
self.assertEqual(gas_balance_sender_before - transferred_amount, gas_balance_sender_after)
self.assertEqual(gas_balance_nep17_before + transferred_amount, gas_balance_nep17_after)
# transferred_amount is multiplied by 2, because this smart contract is minting the GAS received
self.assertEqual(nep17_balance_sender_before + transferred_amount * 2, nep17_balance_sender_after)
# trying to call onNEP17Transfer() will result in an abort if the one calling it is not NEO or GAS contracts
with self.assertRaises(TestExecutionException, msg=self.ABORTED_CONTRACT_MSG):
self.run_smart_contract(engine, path, 'onNEP17Transfer',
self.OWNER_SCRIPT_HASH, transferred_amount, None)
def test_nep17_verify(self):
path = self.get_contract_path('NEP17.py')
engine = TestEngine()
# should fail without signature
result = self.run_smart_contract(engine, path, 'verify',
expected_result_type=bool)
self.assertEqual(False, result)
# should fail if not signed by the owner
result = self.run_smart_contract(engine, path, 'verify',
signer_accounts=[self.OTHER_ACCOUNT_1],
expected_result_type=bool)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'verify',
signer_accounts=[self.OWNER_SCRIPT_HASH],
expected_result_type=bool)
self.assertEqual(True, result)
| 52.496988
| 116
| 0.652189
| 1,978
| 17,429
| 5.479778
| 0.084429
| 0.084417
| 0.053141
| 0.088569
| 0.859028
| 0.821478
| 0.802011
| 0.794723
| 0.75745
| 0.737799
| 0
| 0.017574
| 0.26869
| 17,429
| 331
| 117
| 52.655589
| 0.83281
| 0.095932
| 0
| 0.704918
| 0
| 0
| 0.043182
| 0.002162
| 0
| 0
| 0
| 0
| 0.27459
| 1
| 0.036885
| false
| 0
| 0.036885
| 0
| 0.094262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b91001e332aa64ac253ac48782d8533dd11227e4
| 3,356
|
py
|
Python
|
tests/test_updating_todos_status.py
|
dimasaryo/todomvc-test
|
3e9d793a8c40c6934a7fae6b8ef7752ef09b0715
|
[
"MIT"
] | 4
|
2015-12-15T23:25:39.000Z
|
2020-10-18T09:50:31.000Z
|
tests/test_updating_todos_status.py
|
dimasaryo/todomvc-test
|
3e9d793a8c40c6934a7fae6b8ef7752ef09b0715
|
[
"MIT"
] | null | null | null |
tests/test_updating_todos_status.py
|
dimasaryo/todomvc-test
|
3e9d793a8c40c6934a7fae6b8ef7752ef09b0715
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.mark.usefixtures('generate_test_data')
def test_mark_active_todo_as_complete(todoApp):
"""
Test on marking active todo as complete
Pre-condition:
1. The todos applications consists 3 active todos and 3 completed todos
Steps:
1. Open browser and go to DESTINATION_URL.
3. Click on the Checkbox before the active todo's label.
Expected result:
1. The todo is not active
2. The active todo count is only 2 items left
"""
# Get the active todos
active_todos = todoApp.find_active_todos()
# Update an active todo from the list
text = active_todos[0].text
todoApp.toggle_todo(text)
# ASSERTION
todo = todoApp.find_todo(text)
# Check the new todo status, it should completed.
assert todoApp.is_active_todo(todo) == False
# Check the active todo count
assert todoApp.count_active_todos() == '2 items left'
@pytest.mark.usefixtures('generate_test_data')
def test_mark_completed_todo_as_active(todoApp):
"""
Test on marking completed todo as active
Pre-condition:
1. The todos applications consists 3 active todos and 3 completed todos
Steps:
1. Open browser and go to DESTINATION_URL.
3. Click on the Checkbox before the completed todo's label.
Expected result:
1. The todo is now active
2. The active todo count is increased to 4 items left
"""
# Get the completed todos
completed_todos = todoApp.find_completed_todos()
# Update an completed todo from the list
text = completed_todos[0].text
todoApp.toggle_todo(text)
# ASSERTION
todo = todoApp.find_todo(text)
# Check the new todo status, it should active.
assert todoApp.is_active_todo(todo)
# Check the active todo count
assert todoApp.count_active_todos() == '4 items left'
@pytest.mark.usefixtures('generate_test_data')
def test_toggle_all_todos_one_time(todoApp):
"""
Test on marking all todos as completed
Pre-condition:
1. The todos applications consists 3 active todos and 3 completed todos
Steps:
1. Open browser and go to DESTINATION_URL.
3. Click on the arrow down button before the new todo field.
Expected result:
1. All todos become completed.
2. No active todos left
"""
# Get the completed todos
todoApp.click_toggle_all()
# ASSERTION
# Check the active todo count
assert todoApp.count_active_todos() == '0 items left'
# Get all completed todos
completed_todos = todoApp.find_completed_todos()
assert len(completed_todos) == 6
@pytest.mark.usefixtures('generate_test_data')
def test_toggle_all_todos_twice(todoApp):
"""
Test on marking all todos as active
Pre-condition:
1. The todos applications consists 3 active todos and 3 completed todos
Steps:
1. Open browser and go to DESTINATION_URL.
3. Click on the arrow down button before the new todo field.
Expected result:
1. All todos become active.
2. All todos are active
"""
# Click toggle all button twice
todoApp.click_toggle_all()
todoApp.click_toggle_all()
# ASSERTION
# Check the active todo count
assert todoApp.count_active_todos() == '6 items left'
# Get all completed todos
completed_todos = todoApp.find_completed_todos()
assert len(completed_todos) == 0
# Get all active todos
active_todos = todoApp.find_active_todos()
assert len(active_todos) == 6
| 26.634921
| 73
| 0.728546
| 497
| 3,356
| 4.772636
| 0.158954
| 0.078836
| 0.038364
| 0.045531
| 0.832631
| 0.802277
| 0.777825
| 0.730607
| 0.671164
| 0.620573
| 0
| 0.014558
| 0.201728
| 3,356
| 125
| 74
| 26.848
| 0.870847
| 0.529201
| 0
| 0.5
| 1
| 0
| 0.083975
| 0
| 0
| 0
| 0
| 0.192
| 0.28125
| 1
| 0.125
| false
| 0
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8f22f52d9f397e11d2bf8043c6eeb4c4fbdebb6
| 35,026
|
py
|
Python
|
src/models/gpy_zoo/gpy_zoo.py
|
jejjohnson/uncertain_gps
|
8f71a74dc38640dcf2113eb742229d991ead041d
|
[
"MIT"
] | 9
|
2020-02-23T16:23:58.000Z
|
2022-03-07T06:43:45.000Z
|
src/models/gpy_zoo/gpy_zoo.py
|
jejjohnson/uncertain_gps
|
8f71a74dc38640dcf2113eb742229d991ead041d
|
[
"MIT"
] | null | null | null |
src/models/gpy_zoo/gpy_zoo.py
|
jejjohnson/uncertain_gps
|
8f71a74dc38640dcf2113eb742229d991ead041d
|
[
"MIT"
] | 1
|
2022-02-25T04:37:18.000Z
|
2022-02-25T04:37:18.000Z
|
from typing import Optional, Union, Tuple
import GPy
import numpy as np
from scipy.cluster.vq import kmeans2
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics import r2_score
from sklearn.utils.validation import check_X_y, check_array
class GPRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression algorithm. This algorithm implements the
GPR algorithm with considerations for uncertain inputs.
Parameters
----------
kernel : GPy.kern.Kern, default=None
The kernel function. Default kernel is the RBF kernel.
X_variance : float,np.ndarray, default=None
Option to do uncertain inputs.
max_iters : int, default=200
Maximum number of iterations to use for the optimizer.
optimizer : str, default='lbfgs'
The optimizer to use for the maximum log-likelihood optimization.
n_restarts : int, default=10
Number of random restarts for the optimizer. Good for avoiding
local minima
verbose : int, default=None
Option to display messages during optimization
normalize_y : bool, default=False
Option to normalize the outputs before the optimization. Good
for GP algorithms in general. Will do the reverse transformation
for predictions.
Attributes
----------
X_variance : np.ndarray, (features, features)
The error covariance matrix for the inputs.
gp_model : GPy.core
The trained GP model.
_y_train_mean : np.ndarray, (features)
The
_y_train_std : np.ndarray, (features)
Examples
--------
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = 2.0
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = np.ndarry([0.1, 0.1, 0.1])
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
"""
def __init__(
self,
kernel: Optional[GPy.kern.Kern] = None,
X_variance: Optional[Union[float, np.ndarray]] = None,
max_iters: int = 200,
optimizer: str = "lbfgs",
n_restarts: int = 10,
verbose: Optional[int] = None,
normalize_y: bool = False,
):
self.kernel = kernel
self.X_variance = X_variance
self.max_iters = max_iters
self.optimizer = optimizer
self.n_restarts = n_restarts
self.verbose = verbose
self.normalize_y = normalize_y
def fit(self, X: np.ndarray, y: np.ndarray):
"""Fit the GP regression model.
Parameters
----------
X : np.ndarray, (samples, features)
Input vectors for the training regime
y : np.ndarray, (samples, targets)
Labels for the training regime
Returns
-------
self : returns an instance of self
"""
# Check inputs
X, y = check_X_y(
X, y, multi_output=True, y_numeric=True, ensure_2d=True, dtype="numeric"
)
if self.normalize_y == True:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = np.std(y, axis=0)
# remove mean to make unit variance
y = (y - self._y_train_mean) / self._y_train_std
# get shapes
n_samples, d_dimensions = X.shape
# check X_variance
self.X_variance = self._check_X_variance(self.X_variance, d_dimensions)
# default Kernel Function
if self.kernel is None:
kernel = GPy.kern.RBF(input_dim=d_dimensions, ARD=False)
else:
kernel = self.kernel
# Kernel matrix
gp_model = GPy.models.GPRegression(X, y, kernel, noise_var=0.01)
# Optimization
if self.n_restarts >= 1:
gp_model.optimize_restarts(
num_restarts=self.n_restarts,
robust=True,
verbose=self.verbose,
max_iters=self.max_iters,
)
else:
gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
self.gp_model = gp_model
return self
def _check_X_variance(
self, X_variance: Union[None, float, np.ndarray], n_dims: int
) -> Union[None, np.ndarray]:
"""Private method to check the X_variance parameter
Parameters
----------
X_variance : float, None, np.ndarray
The input for the uncertain inputs
Returns
-------
X_variance : np.ndarray, (n_features, n_features)
The final matrix for the uncertain inputs.
"""
if X_variance is None:
return X_variance
elif isinstance(X_variance, float):
return X_variance * np.ones(shape=(n_dims, n_dims))
elif isinstance(X_variance, np.ndarray):
if X_variance.shape == 1:
return X_variance * np.identity(n_dims)
elif X_variance.shape == n_dims:
return np.diag(X_variance)
elif X_variance.shape == (n_dims, n_dims):
return X_variance
else:
raise ValueError(
f"Shape of 'X_variance' ({X_variance.shape}) "
f"doesn't match X ({n_dims})"
)
else:
raise ValueError(f"Unrecognized type of X_variance.")
def display_model(self):
"""Displays the model parameters of the GP in a clean format.
Inherited from the GPy library."""
return self.gp_model
def predict(
self, X, return_std=False, full_cov=False, noiseless=True, linearized=True
) -> Tuple[np.ndarray, np.ndarray]:
"""Predict using the GP Model. Returns the mean and standard deviation
(optional) or the full covariance matrix (optional). Also includes an
option to add the error correction term for the Linearized GP method.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Input vector to be predicted
return_std : bool, default=False
flag to return the standard deviation in the ouputs.
full_cov : bool, default=False
flag to return the full covariance for the outputs
noiseless : bool, default=True
flag to return the noise likelihood term with the
standard deviation
linearized : bool, default=True
flag to return the standard deviation with the
corrected input error.
Returns
-------
y_mean : np.ndarray, (n_samples, n_targets)
The mean predictions for the outputs
y_std : np.ndarray, (n_samples)
The standard deviations for the outputs.
* linearized with input errors if linearized=True
* has the likelihood noise if noiseless=False
y_cov : np.ndarray, (n_samples, n_samples)
The covariance matrix foer the outputs. Only returned if
full_cov = True
"""
X = check_array(X, ensure_2d=True, dtype="numeric")
if noiseless:
mean, var = self.gp_model.predict_noiseless(X)
else:
mean, var = self.gp_model.predict(X)
# undo normalization
if self.normalize_y == True:
mean = self._y_train_std * mean + self._y_train_mean
var = var * self._y_train_std ** 2
if return_std:
# we want the variance correction
if linearized == True and self.X_variance is not None:
# get the variance correction
var_add = self._variance_correction(X)
# get diagonal elements only
if full_cov == False:
var_add = np.diag(var_add)[:, None]
# add correction to original variance
var += var_add
# return mean and standard deviation
return mean, np.sqrt(var)
else:
return mean, np.sqrt(var)
else:
return mean
def _variance_correction(self, X: np.ndarray) -> np.ndarray:
"""Private method to calculate the corrective term for the
predictive variance.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Returns
-------
var_add : np.ndarray, (n_samples)
"""
# calculate the gradient
x_der, _ = self.gp_model.predictive_gradients(X)
# calculate correction
var_add = x_der[..., 0] @ self.X_variance @ x_der[..., 0].T
return var_add
class SparseGPRegressor(BaseEstimator, RegressorMixin):
"""Sparse Gaussian process regression algorithm. This algorithm
implements the GPR algorithm with considerations for uncertain inputs.
Parameters
----------
kernel : GPy.kern.Kern, default=None
The kernel function. Default kernel is the RBF kernel.
n_inducing : int, default=10
The number of inducing inputs to use for the regression model.
inference : str, default='vfe'
option to choose inference algorithm
* vfe - approximates the posterior (default)
* fitc - approximates the model
* pep - hybrid of above approaches
X_variance : float,np.ndarray, default=None
Option to do uncertain inputs.
max_iters : int, default=200
Maximum number of iterations to use for the optimizer.
optimizer : str, default='scg'
The optimizer to use for the maximum log-likelihood optimization.
n_restarts : int, default=10
Number of random restarts for the optimizer. Good for avoiding
local minima
verbose : int, default=None
Option to display messages during optimization
normalize_y : bool, default=False
Option to normalize the outputs before the optimization. Good
for GP algorithms in general. Will do the reverse transformation
for predictions.
alpha : float, default=0.5
Option only used for the 'PEP' inference algorithm; the factor for
the hybrid between the two other inference algorithms.
alpha = 1 -> VFE
alpha = 0 -> FITC
Attributes
----------
X_variance : np.ndarray, (features, features)
The error covariance matrix for the inputs.
gp_model : GPy.core
The trained GP model.
_y_train_mean : np.ndarray, (features)
The
_y_train_std : np.ndarray, (features)
Examples
--------
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = 2.0
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = np.ndarry([0.1, 0.1, 0.1])
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
"""
def __init__(
self,
kernel: Optional[GPy.kern.Kern] = None,
inference: str = "vfe",
X_variance: Optional[Union[float, np.ndarray]] = None,
n_inducing: int = 10,
max_iters: int = 200,
optimizer: str = "scg",
n_restarts: int = 10,
verbose: Optional[int] = None,
alpha: float = 0.5,
normalize_y: bool = False,
):
self.kernel = kernel
self.n_inducing = n_inducing
self.X_variance = X_variance
self.inference = inference
self.max_iters = max_iters
self.optimizer = optimizer
self.n_restarts = n_restarts
self.verbose = verbose
self.alpha = alpha
self.normalize_y = normalize_y
def fit(self, X: np.ndarray, y: np.ndarray):
"""Fit the Sparse GP regression model.
Parameters
----------
X : np.ndarray, (samples, features)
Input vectors for the training regime
y : np.ndarray, (samples, targets)
Labels for the training regime
Returns
-------
self : returns an instance of self
"""
# Check inputs
X, y = check_X_y(
X, y, multi_output=True, y_numeric=True, ensure_2d=True, dtype="numeric"
)
# normalize outputs
if self.normalize_y == True:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = np.std(y, axis=0)
# remove mean to make unit variance
y = (y - self._y_train_mean) / self._y_train_std
# get shapes
n_samples, d_dimensions = X.shape
# default Kernel Function
if self.kernel is None:
self.kernel = GPy.kern.RBF(input_dim=d_dimensions, ARD=False)
# check X_variance
self.X_variance = self._check_X_variance(self.X_variance, d_dimensions)
# Get inducing points
z = kmeans2(X, self.n_inducing, minit="points")[0]
# Kernel matrix
gp_model = GPy.models.SparseGPRegression(X, y, kernel=self.kernel, Z=z)
# set the fitc inference
if self.inference.lower() == "vfe":
gp_model.inference_method = GPy.inference.latent_function_inference.VarDTC()
elif self.inference.lower() == "fitc":
gp_model.inference_method = GPy.inference.latent_function_inference.FITC()
elif self.inference.lower() == "pep":
gp_model.inference_method = GPy.inference.latent_function_inference.PEP(
self.alpha
)
else:
raise ValueError(f"Unrecognized inference method: {self.inference}")
# Optimize
gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
# Make likelihood variance low to start
gp_model.Gaussian_noise.variance = 0.01
# Optimization
if self.n_restarts >= 1:
gp_model.optimize_restarts(
num_restarts=self.n_restarts,
robust=True,
verbose=self.verbose,
max_iters=self.max_iters,
)
else:
gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
self.gp_model = gp_model
return self
def display_model(self):
return self.gp_model
def _check_X_variance(
self, X_variance: Union[None, float, np.ndarray], n_dims: int
) -> Union[None, np.ndarray]:
"""Private method to check the X_variance parameter
Parameters
----------
X_variance : float, None, np.ndarray
The input for the uncertain inputs
Returns
-------
X_variance : np.ndarray, (n_features, n_features)
The final matrix for the uncertain inputs.
"""
if X_variance is None:
return X_variance
elif isinstance(X_variance, float):
return X_variance * np.ones(shape=(n_dims, n_dims))
elif isinstance(X_variance, np.ndarray):
if X_variance.shape == 1:
return X_variance * np.identity(n_dims)
elif X_variance.shape == n_dims:
return np.diag(X_variance)
elif X_variance.shape == (n_dims, n_dims):
return X_variance
else:
raise ValueError(
f"Shape of 'X_variance' ({X_variance.shape}) "
f"doesn't match X ({n_dims})"
)
else:
raise ValueError(f"Unrecognized type of X_variance.")
def predict(
self, X, return_std=False, full_cov=False, noiseless=True, linearized=True
) -> Tuple[np.ndarray, np.ndarray]:
"""Predict using the GP Model. Returns the mean and standard deviation
(optional) or the full covariance matrix (optional). Also includes an
option to add the error correction term for the Linearized GP method.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Input vector to be predicted
return_std : bool, default=False
flag to return the standard deviation in the ouputs.
full_cov : bool, default=False
flag to return the full covariance for the outputs
noiseless : bool, default=True
flag to return the noise likelihood term with the
standard deviation
linearized : bool, default=True
flag to return the standard deviation with the
corrected input error.
Returns
-------
y_mean : np.ndarray, (n_samples, n_targets)
The mean predictions for the outputs
y_std : np.ndarray, (n_samples)
The standard deviations for the outputs.
* linearized with input errors if linearized=True
* has the likelihood noise if noiseless=False
y_cov : np.ndarray, (n_samples, n_samples)
The covariance matrix foer the outputs. Only returned if
full_cov = True
"""
X = check_array(X, ensure_2d=True, dtype="numeric")
if noiseless:
mean, var = self.gp_model.predict_noiseless(X)
else:
mean, var = self.gp_model.predict(X)
# undo normalization
if self.normalize_y == True:
mean = self._y_train_std * mean + self._y_train_mean
var = var * self._y_train_std ** 2
if return_std:
# we want the variance correction
if linearized == True and self.X_variance is not None:
# get the variance correction
var_add = self._variance_correction(X)
# get diagonal elements only
if full_cov == False:
var_add = np.diag(var_add)[:, None]
# add correction to original variance
var += var_add
# return mean and standard deviation
return mean, np.sqrt(var)
else:
return mean, np.sqrt(var)
else:
return mean
def _variance_correction(self, X: np.ndarray) -> np.ndarray:
"""Private method to calculate the corrective term for the
predictive variance.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Returns
-------
var_add : np.ndarray, (n_samples)
"""
x_der, _ = self.gp_model.predictive_gradients(X)
# calculate correction
var_add = x_der[..., 0] @ self.X_variance @ x_der[..., 0].T
return var_add
class UncertainSGPRegressor(BaseEstimator, RegressorMixin):
"""Sparse Gaussian process regression algorithm. This algorithm
implements the GPR algorithm with considerations for uncertain inputs.
Parameters
----------
kernel : GPy.kern.Kern, default=None
The kernel function. Default kernel is the RBF kernel.
n_inducing : int, default=10
The number of inducing inputs to use for the regression model.
inference : str, default='vfe'
option to choose inference algorithm
* vfe - approximates the posterior (default)
* fitc - approximates the model
X_variance : float,np.ndarray, default=None
Option to do uncertain inputs.
max_iters : int, default=200
Maximum number of iterations to use for the optimizer.
optimizer : str, default='scg'
The optimizer to use for the maximum log-likelihood optimization.
n_restarts : int, default=10
Number of random restarts for the optimizer. Good for avoiding
local minima
verbose : int, default=None
Option to display messages during optimization
normalize_y : bool, default=False
Option to normalize the outputs before the optimization. Good
for GP algorithms in general. Will do the reverse transformation
for predictions.
Attributes
----------
X_variance : np.ndarray, (features, features)
The error covariance matrix for the inputs.
gp_model : GPy.core
The trained GP model.
_y_train_mean : np.ndarray, (features)
The
_y_train_std : np.ndarray, (features)
Examples
--------
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = 2.0
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
>> from src.gpy import GPRegression
>> from sklearn.datasets import make_friedman2
>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>> X_variance = np.ndarry([0.1, 0.1, 0.1])
>> n_restarts = 0
>> verbose = None
>> gp_clf = GPRegression(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
)
>> gp_clf.fit(X, y);
>> ymean, ystd = gp_clf.predict(
Xtest,
return_std=True,
noiseless=False,
linearized=True
)
"""
def __init__(
self,
kernel: Optional[GPy.kern.Kern] = None,
inference: str = "vfe",
X_variance: Optional[Union[float, np.ndarray]] = None,
n_inducing: int = 10,
max_iters: int = 200,
optimizer: str = "scg",
n_restarts: int = 10,
verbose: Optional[int] = None,
normalize_y: bool = False,
batch_size: Optional[int] = None,
):
self.kernel = kernel
self.n_inducing = n_inducing
self.X_variance = X_variance
self.inference = inference
self.max_iters = max_iters
self.optimizer = optimizer
self.n_restarts = n_restarts
self.verbose = verbose
self.normalize_y = normalize_y
self.batch_size = batch_size
def fit(self, X, y):
"""Fit the Sparse GP regression model.
Parameters
----------
X : np.ndarray, (samples, features)
Input vectors for the training regime
y : np.ndarray, (samples, targets)
Labels for the training regime
Returns
-------
self : returns an instance of self
"""
# Check inputs
X, y = check_X_y(
X, y, multi_output=True, y_numeric=True, ensure_2d=True, dtype="numeric"
)
# normalize outputs
if self.normalize_y == True:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = np.std(y, axis=0)
# remove mean to make unit variance
y = (y - self._y_train_mean) / self._y_train_std
n_samples, d_dimensions = X.shape
# default Kernel Function
if self.kernel is None:
self.kernel = GPy.kern.RBF(input_dim=d_dimensions, ARD=False)
# Get inducing points
z = kmeans2(X, self.n_inducing, minit="points")[0]
# Get Variance
X_variance = self._check_X_variance(self.X_variance, X.shape)
# Inference function
if self.inference.lower() == "vfe" or X_variance is not None:
inference_method = GPy.inference.latent_function_inference.VarDTC()
elif self.inference.lower() == "fitc":
inference_method = GPy.inference.latent_function_inference.FITC()
else:
raise ValueError(f"Unrecognized inference method: {self.inference}")
# Kernel matrix
if self.batch_size is None:
gp_model = GPy.models.SparseGPRegression(
X, y, kernel=self.kernel, Z=z, X_variance=X_variance
)
else:
gp_model = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(
Y=y,
X=X,
input_dim=X.shape,
kernel=self.kernel,
Z=z,
X_variance=X_variance,
inference_method=inference_method,
batchsize=self.batch_size,
likelihood=GPy.likelihoods.Gaussian(),
stochastic=False,
missing_data=False,
)
# set the fitc inference
# Optimize
gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
# Make likelihood variance low to start
gp_model.Gaussian_noise.variance = 0.01
# Optimization
if self.n_restarts >= 1:
gp_model.optimize_restarts(
num_restarts=self.n_restarts,
robust=True,
verbose=self.verbose,
max_iters=self.max_iters,
)
else:
gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
self.gp_model = gp_model
return self
def _check_X_variance(
self, X_variance: Union[None, float, np.ndarray], X_shape: Tuple[int, int]
) -> Union[None, np.ndarray]:
"""Private method to check the X_variance parameter
Parameters
----------
X_variance : float, None, np.ndarray
The input for the uncertain inputs
Returns
-------
X_variance : np.ndarray, (n_features, n_features)
The final matrix for the uncertain inputs.
"""
if X_variance is None:
return X_variance
elif isinstance(X_variance, float):
return X_variance * np.ones(shape=X_shape)
elif isinstance(X_variance, np.ndarray):
if X_variance.shape == 1:
return X_variance * np.ones(shape=X_shape)
elif X_variance.shape[0] == X_shape[1]:
return np.tile(self.X_variance, (X_shape[0], 1))
elif X_variance.shape == (X_shape[0], X_shape[1]):
return X_variance
else:
raise ValueError(
f"Shape of 'X_variance' {X_variance.shape} "
f"doesn't match X {X_shape}"
)
else:
raise ValueError(f"Unrecognized type of X_variance.")
def display_model(self):
return self.gp_model
def predict(
self, X, return_std=False, full_cov=False, noiseless=True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Predict using the GP Model. Returns the mean and standard deviation
(optional) or the full covariance matrix (optional). Also includes an
option to add the error correction term for the Linearized GP method.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Input vector to be predicted
return_std : bool, default=False
flag to return the standard deviation in the ouputs.
full_cov : bool, default=False
flag to return the full covariance for the outputs
noiseless : bool, default=True
flag to return the noise likelihood term with the
standard deviation
Returns
-------
y_mean : np.ndarray, (n_samples, n_targets)
The mean predictions for the outputs
y_std : np.ndarray, (n_samples)
The standard deviations for the outputs.
* linearized with input errors if linearized=True
* has the likelihood noise if noiseless=False
y_cov : np.ndarray, (n_samples, n_samples)
The covariance matrix foer the outputs. Only returned if
full_cov = True
"""
X = check_array(X, ensure_2d=True, dtype="numeric")
if noiseless == True:
include_likelihood = False
elif noiseless == False:
include_likelihood = True
else:
raise ValueError(f"Unrecognized argument for noiseless: {noiseless}")
mean, var = self.gp_model.predict(X, include_likelihood=include_likelihood)
# undo normalization
if self.normalize_y == True:
mean = self._y_train_std * mean + self._y_train_mean
var = var * self._y_train_std ** 2
if return_std:
# # we want the variance correction
# if linearized == True and self.X_variance is not None:
# # get the variance correction
# var_add = self._variance_correction(X)
# # get diagonal elements only
# if full_cov == False:
# var_add = np.diag(var_add)[:, None]
# # add correction to original variance
# var += var_add
# # return mean and standard deviation
return mean, np.sqrt(var)
# else:
# return mean, np.sqrt(var)
else:
return mean
def _variance_correction(self, X: np.ndarray) -> np.ndarray:
"""Private method to calculate the corrective term for the
predictive variance.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Returns
-------
var_add : np.ndarray, (n_samples)
"""
x_der, _ = self.gp_model.predictive_gradients(X)
# calculate correction
var_add = x_der[..., 0] @ self.X_variance @ x_der[..., 0].T
return var_add
def demo_linearized_gpr():
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# matplotlib.use("Agg")
rng = np.random.RandomState(0)
# Generate sample data
noise = 1.0
input_noise = 0.2
n_train = 1_000
n_test = 1_000
n_inducing = 100
batch_size = None
X = 15 * rng.rand(n_train, 1)
def plot_results(title=None):
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c="k", label="data")
plt.plot(X_plot, np.sin(X_plot), color="navy", lw=lw, label="True")
plt.plot(X_plot, y_gpr, color="darkorange", lw=lw, label="GPR")
plt.fill_between(
X_plot[:, 0],
(y_gpr - 2 * y_std).squeeze(),
(y_gpr + 2 * y_std).squeeze(),
color="darkorange",
alpha=0.2,
)
plt.xlabel("data")
plt.ylabel("target")
plt.xlim(0, 20)
plt.ylim(-4, 4)
if title is not None:
plt.title(title)
plt.legend(loc="best", scatterpoints=1, prop={"size": 8})
plt.show()
def f(x):
return np.sin(x)
y = f(X)
X += input_noise * rng.randn(X.shape[0], X.shape[1])
y += noise * (0.5 - rng.rand(X.shape[0], X.shape[1])) # add noise
X_plot = np.linspace(0, 20, n_test)[:, None]
X_plot += input_noise * rng.randn(X_plot.shape[0], X_plot.shape[1])
X_plot = np.sort(X_plot, axis=0)
X_variance = input_noise
n_restarts = 0
verbose = 1
normalize_y = False
max_iters = 500
# ==================================
# Standard GPR
# ==================================
gpr_clf = GPRegressor(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
normalize_y=normalize_y,
)
gpr_clf.fit(X, y)
y_gpr, y_std = gpr_clf.predict(
X_plot, return_std=True, noiseless=False, linearized=False
)
print(gpr_clf.display_model())
plot_results("GPR")
y_gpr, y_std = gpr_clf.predict(
X_plot, return_std=True, noiseless=False, linearized=True
)
print(gpr_clf.display_model())
plot_results("GPR")
# ==================================
# Sparse GPR
# ==================================
gpr_clf = SparseGPRegressor(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
normalize_y=normalize_y,
max_iters=max_iters,
n_inducing=n_inducing,
)
gpr_clf.fit(X, y)
y_gpr, y_std = gpr_clf.predict(
X_plot, return_std=True, noiseless=False, linearized=False
)
print(gpr_clf.display_model())
plot_results("SGPR")
y_gpr, y_std = gpr_clf.predict(
X_plot, return_std=True, noiseless=False, linearized=True
)
print(gpr_clf.display_model())
plot_results("SGPR")
# ==================================
# Sparse GPR
# ==================================
gpr_clf = UncertainSGPRegressor(
verbose=verbose,
n_restarts=n_restarts,
X_variance=X_variance,
normalize_y=normalize_y,
max_iters=max_iters,
n_inducing=n_inducing,
batch_size=batch_size,
)
gpr_clf.fit(X, y)
y_gpr, y_std = gpr_clf.predict(
X_plot, return_std=True, noiseless=False, linearized=True
)
print(gpr_clf.display_model())
plot_results("SVGPR")
y_gpr, y_std = gpr_clf.predict(X_plot, return_std=True, noiseless=False,)
print(gpr_clf.display_model())
plot_results("SVGPR")
return None
if __name__ == "__main__":
demo_linearized_gpr()
| 31.161922
| 88
| 0.577428
| 4,190
| 35,026
| 4.649881
| 0.078282
| 0.054971
| 0.011805
| 0.015706
| 0.893805
| 0.885695
| 0.87661
| 0.874814
| 0.862803
| 0.843094
| 0
| 0.009176
| 0.327928
| 35,026
| 1,123
| 89
| 31.189671
| 0.818479
| 0
| 0
| 0.696833
| 0
| 0
| 0.034112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.022624
| null | null | 0.013575
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5372bc06cef8d0564438077d1a4a79f1f3894232
| 186
|
py
|
Python
|
formating_num2.py
|
pavaniuriti2000/python_files
|
89e3c6bd95af6756b49bc8ee1fa74f900ea6c681
|
[
"MIT"
] | null | null | null |
formating_num2.py
|
pavaniuriti2000/python_files
|
89e3c6bd95af6756b49bc8ee1fa74f900ea6c681
|
[
"MIT"
] | null | null | null |
formating_num2.py
|
pavaniuriti2000/python_files
|
89e3c6bd95af6756b49bc8ee1fa74f900ea6c681
|
[
"MIT"
] | null | null | null |
#formating numbers 2
print("%04d"%1000) #1000
print("%04d"%100)
print("%04d"%10)
print("%04d"%1)
print("%.2f"%6)
print("%.4f"%6)
print("%2f"%6666)
| 18.6
| 29
| 0.483871
| 25
| 186
| 3.6
| 0.52
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.268817
| 186
| 9
| 30
| 20.666667
| 0.426471
| 0.123656
| 0
| 0
| 0
| 0
| 0.167702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
5388c57ee0fe0171dcc06ead230a7ecb54660d38
| 5,457
|
py
|
Python
|
gotti/modules/helper_funcs/msg_types.py
|
HellBringerReal/HellYeah
|
c204de5e8212fd32aaae6afd92c2bc7999457d4f
|
[
"MIT"
] | null | null | null |
gotti/modules/helper_funcs/msg_types.py
|
HellBringerReal/HellYeah
|
c204de5e8212fd32aaae6afd92c2bc7999457d4f
|
[
"MIT"
] | null | null | null |
gotti/modules/helper_funcs/msg_types.py
|
HellBringerReal/HellYeah
|
c204de5e8212fd32aaae6afd92c2bc7999457d4f
|
[
"MIT"
] | null | null | null |
from enum import IntEnum, unique
from telegram import Message
from gotti.modules.helper_funcs.string_handling import button_markdown_parser
@unique
class Types(IntEnum):
TEXT = 0
BUTTON_TEXT = 1
STICKER = 2
DOCUMENT = 3
PHOTO = 4
AUDIO = 5
VOICE = 6
VIDEO = 7
VIDEO_NOTE = 8
def get_note_type(msg: Message):
data_type = None
content = None
text = ""
raw_text = msg.text or msg.caption
args = raw_text.split(None, 2) # use python's maxsplit to separate cmd and args
note_name = args[1]
buttons = []
# determine what the contents of the filter are - text, image, sticker, etc
if len(args) >= 3:
offset = len(args[2]) - len(raw_text) # set correct offset relative to command + notename
text, buttons = button_markdown_parser(args[2], entities=msg.parse_entities() or msg.parse_caption_entities(),
offset=offset)
if buttons:
data_type = Types.BUTTON_TEXT
else:
data_type = Types.TEXT
elif msg.reply_to_message:
entities = msg.reply_to_message.parse_entities() or msg.reply_to_message.parse_caption_entities()
msgtext = msg.reply_to_message.text or msg.reply_to_message.caption
if len(args) >= 2 and msg.reply_to_message.text: # not caption, text
text, buttons = button_markdown_parser(msgtext,
entities=entities)
if buttons:
data_type = Types.BUTTON_TEXT
else:
data_type = Types.TEXT
elif msg.reply_to_message.sticker:
content = msg.reply_to_message.sticker.file_id
data_type = Types.STICKER
elif msg.reply_to_message.document:
content = msg.reply_to_message.document.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.DOCUMENT
elif msg.reply_to_message.photo:
content = msg.reply_to_message.photo[-1].file_id # last elem = best quality
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.PHOTO
elif msg.reply_to_message.audio:
content = msg.reply_to_message.audio.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.AUDIO
elif msg.reply_to_message.voice:
content = msg.reply_to_message.voice.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.VOICE
elif msg.reply_to_message.video:
content = msg.reply_to_message.video.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.VIDEO
elif msg.reply_to_message.video_note:
content = msg.reply_to_message.video_note.file_id
text, buttons = button_markdown_parser(msgtext, entities=entities)
data_type = Types.VIDEO_NOTE
return note_name, text, data_type, content, buttons
# note: add own args?
def get_welcome_type(msg: Message):
data_type = None
content = None
text = ""
args = msg.text.split(None, 1) # use python's maxsplit to separate cmd and args
buttons = []
# determine what the contents of the filter are - text, image, sticker, etc
# some media, cannot have captions in the Telegram BOT API
if len(args) >= 2 and not msg.reply_to_message:
offset = len(args[1]) - len(msg.text) # set correct offset relative to command + notename
text, buttons = button_markdown_parser(args[1], entities=msg.parse_entities(), offset=offset)
if buttons:
data_type = Types.BUTTON_TEXT
else:
data_type = Types.TEXT
elif msg.reply_to_message and msg.reply_to_message.sticker:
content = msg.reply_to_message.sticker.file_id
text = msg.reply_to_message.caption
data_type = Types.STICKER
elif msg.reply_to_message and msg.reply_to_message.document:
content = msg.reply_to_message.document.file_id
text = msg.reply_to_message.caption
data_type = Types.DOCUMENT
elif msg.reply_to_message and msg.reply_to_message.photo:
content = msg.reply_to_message.photo[-1].file_id # last elem = best quality
text = msg.reply_to_message.caption
data_type = Types.PHOTO
elif msg.reply_to_message and msg.reply_to_message.audio:
content = msg.reply_to_message.audio.file_id
text = msg.reply_to_message.caption
data_type = Types.AUDIO
elif msg.reply_to_message and msg.reply_to_message.voice:
content = msg.reply_to_message.voice.file_id
text = msg.reply_to_message.caption
data_type = Types.VOICE
elif msg.reply_to_message and msg.reply_to_message.video:
content = msg.reply_to_message.video.file_id
text = msg.reply_to_message.caption
data_type = Types.VIDEO
elif msg.reply_to_message.video_note:
msgtext = ""
if len(args) > 1:
msgtext = args[1]
content = msg.reply_to_message.video_note.file_id
text, buttons = button_markdown_parser(msgtext, entities=msg.reply_to_message.parse_caption_entities(), offset=0)
data_type = Types.VIDEO_NOTE
return text, data_type, content, buttons
| 37.634483
| 121
| 0.660436
| 731
| 5,457
| 4.674419
| 0.135431
| 0.112379
| 0.140474
| 0.238806
| 0.831724
| 0.788411
| 0.76851
| 0.746854
| 0.731051
| 0.646766
| 0
| 0.005976
| 0.264065
| 5,457
| 144
| 122
| 37.895833
| 0.844871
| 0.08906
| 0
| 0.548673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.026549
| 0
| 0.150442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53912269f166a544646f3d4f70195bbcff903dd5
| 35,487
|
py
|
Python
|
sdk/python/pulumi_aws/cognito/identity_pool.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/cognito/identity_pool.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/cognito/identity_pool.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['IdentityPoolArgs', 'IdentityPool']
@pulumi.input_type
class IdentityPoolArgs:
def __init__(__self__, *,
identity_pool_name: pulumi.Input[str],
allow_classic_flow: Optional[pulumi.Input[bool]] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a IdentityPool resource.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[bool] allow_classic_flow: Enables or disables the classic / basic authentication flow. Default is `false`.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "identity_pool_name", identity_pool_name)
if allow_classic_flow is not None:
pulumi.set(__self__, "allow_classic_flow", allow_classic_flow)
if allow_unauthenticated_identities is not None:
pulumi.set(__self__, "allow_unauthenticated_identities", allow_unauthenticated_identities)
if cognito_identity_providers is not None:
pulumi.set(__self__, "cognito_identity_providers", cognito_identity_providers)
if developer_provider_name is not None:
pulumi.set(__self__, "developer_provider_name", developer_provider_name)
if openid_connect_provider_arns is not None:
pulumi.set(__self__, "openid_connect_provider_arns", openid_connect_provider_arns)
if saml_provider_arns is not None:
pulumi.set(__self__, "saml_provider_arns", saml_provider_arns)
if supported_login_providers is not None:
pulumi.set(__self__, "supported_login_providers", supported_login_providers)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="identityPoolName")
def identity_pool_name(self) -> pulumi.Input[str]:
"""
The Cognito Identity Pool name.
"""
return pulumi.get(self, "identity_pool_name")
@identity_pool_name.setter
def identity_pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_pool_name", value)
@property
@pulumi.getter(name="allowClassicFlow")
def allow_classic_flow(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables the classic / basic authentication flow. Default is `false`.
"""
return pulumi.get(self, "allow_classic_flow")
@allow_classic_flow.setter
def allow_classic_flow(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_classic_flow", value)
@property
@pulumi.getter(name="allowUnauthenticatedIdentities")
def allow_unauthenticated_identities(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the identity pool supports unauthenticated logins or not.
"""
return pulumi.get(self, "allow_unauthenticated_identities")
@allow_unauthenticated_identities.setter
def allow_unauthenticated_identities(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_unauthenticated_identities", value)
@property
@pulumi.getter(name="cognitoIdentityProviders")
def cognito_identity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]]:
"""
An array of Amazon Cognito Identity user pools and their client IDs.
"""
return pulumi.get(self, "cognito_identity_providers")
@cognito_identity_providers.setter
def cognito_identity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]]):
pulumi.set(self, "cognito_identity_providers", value)
@property
@pulumi.getter(name="developerProviderName")
def developer_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
"""
return pulumi.get(self, "developer_provider_name")
@developer_provider_name.setter
def developer_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_provider_name", value)
@property
@pulumi.getter(name="openidConnectProviderArns")
def openid_connect_provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of OpendID Connect provider ARNs.
"""
return pulumi.get(self, "openid_connect_provider_arns")
@openid_connect_provider_arns.setter
def openid_connect_provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "openid_connect_provider_arns", value)
@property
@pulumi.getter(name="samlProviderArns")
def saml_provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
"""
return pulumi.get(self, "saml_provider_arns")
@saml_provider_arns.setter
def saml_provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "saml_provider_arns", value)
@property
@pulumi.getter(name="supportedLoginProviders")
def supported_login_providers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-Value pairs mapping provider names to provider app IDs.
"""
return pulumi.get(self, "supported_login_providers")
@supported_login_providers.setter
def supported_login_providers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "supported_login_providers", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _IdentityPoolState:
def __init__(__self__, *,
allow_classic_flow: Optional[pulumi.Input[bool]] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering IdentityPool resources.
:param pulumi.Input[bool] allow_classic_flow: Enables or disables the classic / basic authentication flow. Default is `false`.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[str] arn: The ARN of the identity pool.
:param pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if allow_classic_flow is not None:
pulumi.set(__self__, "allow_classic_flow", allow_classic_flow)
if allow_unauthenticated_identities is not None:
pulumi.set(__self__, "allow_unauthenticated_identities", allow_unauthenticated_identities)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if cognito_identity_providers is not None:
pulumi.set(__self__, "cognito_identity_providers", cognito_identity_providers)
if developer_provider_name is not None:
pulumi.set(__self__, "developer_provider_name", developer_provider_name)
if identity_pool_name is not None:
pulumi.set(__self__, "identity_pool_name", identity_pool_name)
if openid_connect_provider_arns is not None:
pulumi.set(__self__, "openid_connect_provider_arns", openid_connect_provider_arns)
if saml_provider_arns is not None:
pulumi.set(__self__, "saml_provider_arns", saml_provider_arns)
if supported_login_providers is not None:
pulumi.set(__self__, "supported_login_providers", supported_login_providers)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="allowClassicFlow")
def allow_classic_flow(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables the classic / basic authentication flow. Default is `false`.
"""
return pulumi.get(self, "allow_classic_flow")
@allow_classic_flow.setter
def allow_classic_flow(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_classic_flow", value)
@property
@pulumi.getter(name="allowUnauthenticatedIdentities")
def allow_unauthenticated_identities(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the identity pool supports unauthenticated logins or not.
"""
return pulumi.get(self, "allow_unauthenticated_identities")
@allow_unauthenticated_identities.setter
def allow_unauthenticated_identities(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_unauthenticated_identities", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the identity pool.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="cognitoIdentityProviders")
def cognito_identity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]]:
"""
An array of Amazon Cognito Identity user pools and their client IDs.
"""
return pulumi.get(self, "cognito_identity_providers")
@cognito_identity_providers.setter
def cognito_identity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IdentityPoolCognitoIdentityProviderArgs']]]]):
pulumi.set(self, "cognito_identity_providers", value)
@property
@pulumi.getter(name="developerProviderName")
def developer_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
"""
return pulumi.get(self, "developer_provider_name")
@developer_provider_name.setter
def developer_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_provider_name", value)
@property
@pulumi.getter(name="identityPoolName")
def identity_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The Cognito Identity Pool name.
"""
return pulumi.get(self, "identity_pool_name")
@identity_pool_name.setter
def identity_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_pool_name", value)
@property
@pulumi.getter(name="openidConnectProviderArns")
def openid_connect_provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of OpendID Connect provider ARNs.
"""
return pulumi.get(self, "openid_connect_provider_arns")
@openid_connect_provider_arns.setter
def openid_connect_provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "openid_connect_provider_arns", value)
@property
@pulumi.getter(name="samlProviderArns")
def saml_provider_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
"""
return pulumi.get(self, "saml_provider_arns")
@saml_provider_arns.setter
def saml_provider_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "saml_provider_arns", value)
@property
@pulumi.getter(name="supportedLoginProviders")
def supported_login_providers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-Value pairs mapping provider names to provider app IDs.
"""
return pulumi.get(self, "supported_login_providers")
@supported_login_providers.setter
def supported_login_providers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "supported_login_providers", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class IdentityPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_flow: Optional[pulumi.Input[bool]] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides an AWS Cognito Identity Pool.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.iam.SamlProvider("default", saml_metadata_document=(lambda path: open(path).read())("saml-metadata.xml"))
main = aws.cognito.IdentityPool("main",
identity_pool_name="identity pool",
allow_unauthenticated_identities=False,
allow_classic_flow=False,
cognito_identity_providers=[
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="6lhlkkfbfb4q5kpp90urffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/us-east-1_Tv0493apJ",
server_side_token_check=False,
),
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="7kodkvfqfb4qfkp39eurffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/eu-west-1_Zr231apJu",
server_side_token_check=False,
),
],
supported_login_providers={
"graph.facebook.com": "7346241598935552",
"accounts.google.com": "123456789012.apps.googleusercontent.com",
},
saml_provider_arns=[default.arn],
openid_connect_provider_arns=["arn:aws:iam::123456789012:oidc-provider/id.example.com"])
```
## Import
Cognito Identity Pool can be imported using the name, e.g.
```sh
$ pulumi import aws:cognito/identityPool:IdentityPool mypool <identity-pool-id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_classic_flow: Enables or disables the classic / basic authentication flow. Default is `false`.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IdentityPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an AWS Cognito Identity Pool.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.iam.SamlProvider("default", saml_metadata_document=(lambda path: open(path).read())("saml-metadata.xml"))
main = aws.cognito.IdentityPool("main",
identity_pool_name="identity pool",
allow_unauthenticated_identities=False,
allow_classic_flow=False,
cognito_identity_providers=[
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="6lhlkkfbfb4q5kpp90urffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/us-east-1_Tv0493apJ",
server_side_token_check=False,
),
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="7kodkvfqfb4qfkp39eurffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/eu-west-1_Zr231apJu",
server_side_token_check=False,
),
],
supported_login_providers={
"graph.facebook.com": "7346241598935552",
"accounts.google.com": "123456789012.apps.googleusercontent.com",
},
saml_provider_arns=[default.arn],
openid_connect_provider_arns=["arn:aws:iam::123456789012:oidc-provider/id.example.com"])
```
## Import
Cognito Identity Pool can be imported using the name, e.g.
```sh
$ pulumi import aws:cognito/identityPool:IdentityPool mypool <identity-pool-id>
```
:param str resource_name: The name of the resource.
:param IdentityPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IdentityPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_flow: Optional[pulumi.Input[bool]] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IdentityPoolArgs.__new__(IdentityPoolArgs)
__props__.__dict__["allow_classic_flow"] = allow_classic_flow
__props__.__dict__["allow_unauthenticated_identities"] = allow_unauthenticated_identities
__props__.__dict__["cognito_identity_providers"] = cognito_identity_providers
__props__.__dict__["developer_provider_name"] = developer_provider_name
if identity_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'identity_pool_name'")
__props__.__dict__["identity_pool_name"] = identity_pool_name
__props__.__dict__["openid_connect_provider_arns"] = openid_connect_provider_arns
__props__.__dict__["saml_provider_arns"] = saml_provider_arns
__props__.__dict__["supported_login_providers"] = supported_login_providers
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
super(IdentityPool, __self__).__init__(
'aws:cognito/identityPool:IdentityPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_flow: Optional[pulumi.Input[bool]] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'IdentityPool':
"""
Get an existing IdentityPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_classic_flow: Enables or disables the classic / basic authentication flow. Default is `false`.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[str] arn: The ARN of the identity pool.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IdentityPoolState.__new__(_IdentityPoolState)
__props__.__dict__["allow_classic_flow"] = allow_classic_flow
__props__.__dict__["allow_unauthenticated_identities"] = allow_unauthenticated_identities
__props__.__dict__["arn"] = arn
__props__.__dict__["cognito_identity_providers"] = cognito_identity_providers
__props__.__dict__["developer_provider_name"] = developer_provider_name
__props__.__dict__["identity_pool_name"] = identity_pool_name
__props__.__dict__["openid_connect_provider_arns"] = openid_connect_provider_arns
__props__.__dict__["saml_provider_arns"] = saml_provider_arns
__props__.__dict__["supported_login_providers"] = supported_login_providers
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return IdentityPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowClassicFlow")
def allow_classic_flow(self) -> pulumi.Output[Optional[bool]]:
"""
Enables or disables the classic / basic authentication flow. Default is `false`.
"""
return pulumi.get(self, "allow_classic_flow")
@property
@pulumi.getter(name="allowUnauthenticatedIdentities")
def allow_unauthenticated_identities(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the identity pool supports unauthenticated logins or not.
"""
return pulumi.get(self, "allow_unauthenticated_identities")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the identity pool.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cognitoIdentityProviders")
def cognito_identity_providers(self) -> pulumi.Output[Optional[Sequence['outputs.IdentityPoolCognitoIdentityProvider']]]:
"""
An array of Amazon Cognito Identity user pools and their client IDs.
"""
return pulumi.get(self, "cognito_identity_providers")
@property
@pulumi.getter(name="developerProviderName")
def developer_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
"""
return pulumi.get(self, "developer_provider_name")
@property
@pulumi.getter(name="identityPoolName")
def identity_pool_name(self) -> pulumi.Output[str]:
"""
The Cognito Identity Pool name.
"""
return pulumi.get(self, "identity_pool_name")
@property
@pulumi.getter(name="openidConnectProviderArns")
def openid_connect_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Set of OpendID Connect provider ARNs.
"""
return pulumi.get(self, "openid_connect_provider_arns")
@property
@pulumi.getter(name="samlProviderArns")
def saml_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
"""
return pulumi.get(self, "saml_provider_arns")
@property
@pulumi.getter(name="supportedLoginProviders")
def supported_login_providers(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-Value pairs mapping provider names to provider app IDs.
"""
return pulumi.get(self, "supported_login_providers")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the Identity Pool. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 53.124251
| 263
| 0.688816
| 4,107
| 35,487
| 5.704651
| 0.062089
| 0.095779
| 0.054975
| 0.041615
| 0.919971
| 0.90866
| 0.904051
| 0.89577
| 0.891203
| 0.885655
| 0
| 0.004342
| 0.214783
| 35,487
| 667
| 264
| 53.203898
| 0.836462
| 0.352552
| 0
| 0.777465
| 1
| 0
| 0.141487
| 0.094766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16338
| false
| 0.002817
| 0.019718
| 0
| 0.28169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
53beb00a25870391efaa7ab2c128b493ad51db68
| 84,939
|
py
|
Python
|
gpMgmt/bin/gppylib/operations/test/unit/test_unit_persistent_rebuild.py
|
nurikk/gpdb
|
04fe0202c59721826d1eda2b19d73e5572893fcb
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2020-01-23T04:09:39.000Z
|
2020-01-23T04:09:39.000Z
|
gpMgmt/bin/gppylib/operations/test/unit/test_unit_persistent_rebuild.py
|
chrishajas/gpdb
|
564b9235a46e2ead1650b753b2d070796cced6f6
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/test/unit/test_unit_persistent_rebuild.py
|
chrishajas/gpdb
|
564b9235a46e2ead1650b753b2d070796cced6f6
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2020-11-17T09:03:53.000Z
|
2020-11-17T09:03:53.000Z
|
#!/usr/bin/env python
#
# Copyright (c) Pivotal Inc 2014. All Rights Reserved.
#
import os
import re
import shutil
import unittest2 as unittest
from collections import defaultdict
from gppylib.gpversion import GpVersion
from gppylib.commands.base import Command, CommandResult, ExecutionError
from mock import patch, MagicMock, Mock, mock_open
from gppylib.operations.persistent_rebuild import ValidateContentID, DbIdInfo, GetDbIdInfo, BackupPersistentTableFiles,\
RebuildTable, RebuildPersistentTables, ValidatePersistentBackup,\
RunBackupRestore, ValidateMD5Sum
remove_per_db_pt_entry = False
remove_global_pt_entry = False
def pt_query_side_effect(*args, **kwargs):
# missing the global persistent table entry
GET_ALL_DATABASES = """select oid, datname from pg_database"""
PER_DATABASE_PT_FILES_QUERY = """SELECT relfilenode FROM pg_class WHERE oid IN (5094, 5095)"""
GLOBAL_PT_FILES_QUERY = """SELECT relfilenode FROM pg_class WHERE oid IN (5090, 5091, 5092, 5093)"""
if args[1] == GET_ALL_DATABASES:
return [[123, 'db1']]
elif args[1] == PER_DATABASE_PT_FILES_QUERY:
if remove_per_db_pt_entry:
return [[5095]]
else:
return [[5094], [5095]]
else:
if remove_global_pt_entry:
return [[5091], [5092], [5093]]
else:
return [[5090], [5091], [5092], [5093]]
class ValidateContentIDTestCase(unittest.TestCase):
def setUp(self):
self.contentid_validator = ValidateContentID(content_id=None,
contentid_file=None,
gparray=None)
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_valid_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = '1\n2\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_spaces_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = ' 1\n2 \n3 \n'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_invalid_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = '1\nb\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
with self.assertRaisesRegexp(Exception, 'Found non integer content id "b" in contentid file "/tmp/contentid_file"'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_empty_file(self, mock1):
file_contents = ''
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
with self.assertRaisesRegexp(Exception, 'Please make sure there is atleast one integer content ID in the file'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=False)
def test_validate_contentid_file_with_non_existent_file(self, mock1):
expected = [1, 2, 3]
file_contents = '1\nb\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
with self.assertRaisesRegexp(Exception, 'Unable to find contentid file "/tmp/contentid_file"'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_blank_lines(self, mock1):
expected = [1, 2]
file_contents = '1\n\n\n2'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_negative_integers(self, mock1):
expected = [-1, 2]
file_contents = '-1\n2'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
def test_validate_content_id_with_valid_segments(self):
expected = [1, 2, 3]
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.assertEqual(expected, self.contentid_validator._validate_content_id())
def test_validate_content_id_with_invalid_segments(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = i + 10
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
with self.assertRaisesRegexp(Exception, 'The following content ids are not present in gp_segment_configuration: 1, 2, 3'):
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_primary_segment_down(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
if i == 1:
m.getSegmentStatus = Mock()
m.getSegmentStatus.return_value = 'd'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_resync(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
m.getSegmentStatus.return_value = 'u'
if i == 1:
m.getSegmentMode.return_value = 'r'
else:
m.getSegmentMode.return_value = 's'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
with self.assertRaisesRegexp(Exception, 'Can not rebuild persistent tables for content ids that are in resync mode'):
self.contentid_validator._validate_content_id()
@patch('gppylib.operations.persistent_rebuild.ask_yesno', return_value=False)
def test_validate_content_id_with_some_others_resync(self, mock1):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
m.getSegmentStatus.return_value = 'u'
if m.getSegmentContentId.return_value in (1, 2):
m.getSegmentMode.return_value = 'r'
else:
m.getSegmentMode.return_value = 's'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [3]
with self.assertRaisesRegexp(Exception, 'Aborting rebuild due to user request'):
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_change_tracking_segments(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
if i == 1:
m.getSegmentStatus = Mock()
m.getSegmentStatus.return_value = 'c'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.assertEqual([1, 2, 3], self.contentid_validator._validate_content_id())
def test_parse_content_id(self):
self.contentid_validator.content_id = '1, 2, 3'
self.assertEqual([1, 2, 3], self.contentid_validator._parse_content_id())
def test_parse_content_id_valid_single_content_id(self):
self.contentid_validator.content_id = '-1'
self.assertEqual([-1], self.contentid_validator._parse_content_id())
def test_parse_content_id_invalid_comma_separated_list(self):
self.contentid_validator.content_id = '1, 2, 3,,'
with self.assertRaisesRegexp(Exception, 'Some content ids are not integers:'):
self.contentid_validator._parse_content_id()
def test_parse_content_id_invalid_integers(self):
self.contentid_validator.content_id = '1, 2, a, x,'
with self.assertRaisesRegexp(Exception, 'Some content ids are not integers:'):
self.contentid_validator._parse_content_id()
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', return_value=[1, 2, 3])
def test_validate_with_only_content_id(self, mock1):
self.contentid_validator.content_id = '1, 2, 3'
self.contentid_validator.contentid_file = None
self.assertEqual([1, 2, 3], self.contentid_validator.validate())
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', side_effect=Exception('ERROR'))
def test_validate_with_only_content_id_with_error(self, mock1):
self.contentid_validator.content_id = '1, 2, 3'
self.contentid_validator.contentid_file = None
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.contentid_validator.validate()
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_contentid_file', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', return_value=[1, 2, 3])
def test_validate_with_only_content_id_file(self, mock1, mock2):
self.contentid_validator.contentid_file = '/tmp/f1'
self.contentid_validator.content_id = None
self.assertEqual([1, 2, 3], self.contentid_validator.validate())
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_contentid_file', side_effect=Exception('ERROR'))
def test_validate_with_only_content_id_file_with_error(self, mock1):
self.contentid_validator.contentid_file = '/tmp/f1'
self.contentid_validator.content_id = None
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.contentid_validator.validate()
class GetDbIdInfoTestCase(unittest.TestCase):
def setUp(self):
self.dbid_info = GetDbIdInfo(gparray=None, content_id=None)
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[(1000, '2000'), (1001, '2001 2002')])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_filespace_to_tablespace_map(self, mock1, mock2, mock3):
m = Mock()
m.getSegmentFilespaces.return_value = {1000: '/tmp/fs1', 1001: '/tmp/fs2'}
self.assertEqual({1000: [2000], 1001: [2001, 2002]}, self.dbid_info._get_filespace_to_tablespace_map(m))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_filespace_to_tablespace_map_empty_filespaces(self, mock1, mock2, mock3):
m = Mock()
m.getSegmentFilespaces.return_value = {}
self.assertEqual({}, self.dbid_info._get_filespace_to_tablespace_map(m))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[(1000, '2000'), (1001, '2001 2002')])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_tablespace_to_dboid_map(self, mock1, mock2, mock3):
ts_oids = [1000, 1001]
self.assertEqual({1000: [2000], 1001: [2001, 2002]}, self.dbid_info._get_tablespace_to_dboid_map(ts_oids))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_tablespace_to_dboid_map_empty_tablespaces(self, mock1, mock2, mock3):
ts_oids = []
self.assertEqual({}, self.dbid_info._get_tablespace_to_dboid_map(ts_oids))
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_no_matching_content_id(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [11, 12]
expected = []
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={1000: [2000, 2002], 1001: [2001, 2003]})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map',
return_value={2000: [12345], 2001: [2345, 4567], 2002: [8765, 4634], 2003: [3456]})
def test_get_info_with_single_matching_content_id(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {1000: [2000, 2002], 1001: [2001, 2003]},
{2000: [12345], 2001: [2345, 4567], 2002: [8765, 4634], 2003: [3456]})]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_no_filespaces(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {}, {}, {})]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_no_tablespaces(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {})]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_down_segments(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'd' if i == 3 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {})]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_segment_in_ct(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'c' if i == 3 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {})]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_content_down(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'd' if i == 3 or i == 0 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {})]
self.assertEqual(expected, self.dbid_info.get_info())
class BackupPersistentTableFilesTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create persistent table files under new filespace/tablespace/database,
# and also the default filespace, tablespace/database
# timestamp: 20140604101010
try:
# source files
os.makedirs(os.path.join('/tmp/p1', '2000', '123'))
os.makedirs(os.path.join('/tmp/p2', 'base', '234'))
os.makedirs(os.path.join('/tmp/p2', 'global'))
os.makedirs(os.path.join('/tmp/p2', 'pg_xlog'))
os.makedirs(os.path.join('/tmp/p2', 'pg_clog'))
os.makedirs(os.path.join('/tmp/p2', 'pg_distributedlog'))
os.makedirs(os.path.join('/tmp/p1', 'empty'))
open('/tmp/p1/2000/123/5094', 'w').close()
open('/tmp/p1/2000/123/5094.1', 'w').close()
open('/tmp/p1/2000/123/5095', 'w').close()
open('/tmp/p2/base/234/5094', 'w').close()
open('/tmp/p2/base/234/5095', 'w').close()
open('/tmp/p2/global/pg_control', 'w').close()
open('/tmp/p2/global/5090', 'w').close()
open('/tmp/p2/global/5091', 'w').close()
open('/tmp/p2/global/5092', 'w').close()
open('/tmp/p2/global/5093', 'w').close()
open('/tmp/p2/pg_xlog/0000', 'w').close()
open('/tmp/p2/pg_clog/0000', 'w').close()
open('/tmp/p2/pg_distributedlog/000', 'w').close()
# Backup files
os.makedirs(os.path.join('/tmp/p1', 'pt_rebuild_bk_20140604101010','2000', '123'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'base', '234'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'global'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_xlog'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_clog'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_distributedlog'))
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094', 'w').close()
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094.1', 'w').close()
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5095', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/base/234/5094', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/base/234/5095', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/pg_control', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5090', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5091', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5092', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5093', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_xlog/0000', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_clog/0000', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_distributedlog/000', 'w').close()
except OSError:
pass
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree('/tmp/p1')
shutil.rmtree('/tmp/p2')
except Exception:
pass
def setUp(self):
self.backup_persistent_files = BackupPersistentTableFiles(dbid_info=None,
perdb_pt_filenames={2:{17088L:['5094', '5095'],1L: [5094L, 5095L]},
3:{17088L:['5094', '5095'],1L: [5094L, 5095L]}},
global_pt_filenames={2: ['5090', '5091', '5092', '5093'],
3: ['5090', '5091', '5092', '5093']},
timestamp='20140604101010')
@patch('os.makedirs')
def test_copy_files(self, mock1):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
content = -1
actionType = 'backup'
m = Mock()
m.validate.return_value = {'/tmp/global/5090': 'abdfe', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'abdfe', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.Command.run')
def test_copy_files_with_restore(self, mock1, mock2):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'restore'
m.validate.return_value = {'/tmp/global/5090': 'abdfe', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'abdfe', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
def test_copy_files_without_errors_with_no_files(self, mock1):
src_ptfiles = []
dst_ptfiles = []
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'backup'
m.validate.side_effect = [{}, {}]
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.Command.run')
def test_copy_files_without_errors_with_no_files_with_restore(self, mock1, mock2):
src_ptfiles = []
dst_ptfiles = []
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'restore'
m.validate.side_effect = [{}, {}]
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files.restore=True
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
def test_copy_files_with_md5_mismatch(self, mock1):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'backup'
m.validate.return_value = {'/tmp/global/5090': 'asdfads', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'asdfadsf', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
with self.assertRaisesRegexp(Exception, 'MD5 sums do not match! Expected md5 = "{\'/tmp/global/5090\': \'asdfads\'}",\
but actual md5 = "{\'/tmp1/global/5090\': \'asdfadsf\'}"'):
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.validate', return_value={'5090': 'sdfadsf', '5091': 'sdfadsf'})
def test_copy_files_with_errors(self, mock1, mock2):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
m = Mock()
content = -1
actionType = 'backup'
m.check_results.side_effect = ExecutionError('Error !!!', Mock())
self.backup_persistent_files.pool = m
m.validate.return_value = {'5090': 'sdfadsf', '5091': 'sdfadsf'}
self.backup_persistent_files.md5_validator = m
with self.assertRaisesRegexp(ExecutionError, 'Error !!!'):
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
def test_build_PT_src_dest_pairs_filelist_None(self):
src_dir = ''
dest_dir = ''
file_list = None
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_filelist_Empty(self):
src_dir = ''
dest_dir = ''
file_list = []
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_non_exist_src_dir(self):
src_dir = 'tmp'
dest_dir = '/tmp'
file_list = ['5090']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_empty_src_dir(self):
src_dir = '/tmp/p1/empty'
dest_dir = '/tmp/p1/empty'
file_list = ['5090']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_with_file_missed(self):
src_dir = '/tmp/p1/'
dest_dir = '/tmp/p1/'
file_list = ['5555']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_with_extended_file_exist(self):
src_dir = '/tmp/p1/2000/123'
dest_dir = '/tmp/p1/pt_rebuild_bk_20140604101010/2000/123'
file_list = ['5094']
src_files = ['/tmp/p1/2000/123/5094', '/tmp/p1/2000/123/5094.1']
dest_files = ['/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094', '/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094.1']
self.assertEqual((src_files, dest_files), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_global_pt_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing global persistent files from source directory.'):
self.backup_persistent_files._copy_global_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_global_pt_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing global persistent files from source directory.'):
self.backup_persistent_files._copy_global_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_global_pt_files_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Backup of global persistent files failed'):
self.backup_persistent_files._copy_global_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_global_pt_files_with_restore_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Restore of global persistent files failed'):
self.backup_persistent_files._copy_global_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_per_db_pt_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing per-database persistent files from source directory.'):
self.backup_persistent_files._copy_per_db_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_per_db_pt_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing per-database persistent files from source directory.'):
self.backup_persistent_files._copy_per_db_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_per_db_pt_files_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Backup of per database persistent files failed'):
self.backup_persistent_files._copy_per_db_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_unused_filespace(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_unused_tablespace(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_Xactlog_files_without_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_Xactlog_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_Xactlog_src_dest_pairs', return_value=[[],[]])
def test_copy_Xactlog_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'should not be empty'):
self.backup_persistent_files._copy_Xactlog_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_Xactlog_src_dest_pairs', return_value=[[],[]])
def test_copy_Xactlog_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'should not be empty'):
self.backup_persistent_files._copy_Xactlog_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_Xactlog_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_Xactlog_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_pg_control_files_without_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_pg_control_file())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_pg_control_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_pg_control_file(restore=True))
@patch('os.path.isfile', return_value=False)
def test_copy_pg_control_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Global pg_control file is missing from source directory'):
self.backup_persistent_files._copy_pg_control_file()
@patch('os.path.isfile', return_value=False)
def test_copy_pg_control_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Global pg_control file is missing from backup directory'):
self.backup_persistent_files._copy_pg_control_file(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Mock(), Mock(), Exception('Error while backing up files')])
def test_copy_per_db_pt_files_with_restore_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Restore of per database persistent files failed'):
self.backup_persistent_files._copy_per_db_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_without_errors(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7, mock8):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files.restore())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_global_file_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_per_db_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_xlog_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_pg_control_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_global_and_per_db_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_xlog_and_pg_control_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
class RebuildTableTestCase(unittest.TestCase):
def setUp(self):
self.rebuild_table = RebuildTable(dbid_info=None)
def test_get_valid_dbids(self):
content_ids = [1, 2]
expected = [0, 1]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_empty_contents(self):
content_ids = []
expected = []
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_non_matching_content_ids(self):
content_ids = [3, 4, 5]
expected = []
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_content_ids_down(self):
content_ids = [1, 2, 3]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u' if i % 2 else 'd'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
with self.assertRaisesRegexp(Exception, 'Segment .* is down. Cannot continue with persistent table rebuild'):
self.rebuild_table._get_valid_dbids(content_ids)
def test_get_valid_dbids_content_ids_resync(self):
content_ids = [1, 2, 3]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
m.getSegmentMode.return_value = 'r' if i % 2 else 's'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
with self.assertRaisesRegexp(Exception, 'Segment .* is in resync. Cannot continue with persistent table rebuild'):
self.rebuild_table._get_valid_dbids(content_ids)
@patch('gppylib.operations.persistent_rebuild.ValidatePersistentBackup.validate_backups', return_value=Mock())
def test_get_valid_dbids_content_ids_are_mirrors(self, mock1):
content_ids = [1, 2, 3]
expected = [1]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p' if i % 2 else 'm'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
@patch('gppylib.operations.persistent_rebuild.GpArray')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._validate_backups')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._get_valid_dbids', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ParallelOperation.run')
def test_rebuild(self, mock1, mock2, mock3, mock4):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.rebuild_table.dbid_info = [d1, d2]
expected_success = [d1, d2]
expected_failure = []
self.assertEqual((expected_success, expected_failure), self.rebuild_table.rebuild())
@patch('gppylib.operations.persistent_rebuild.GpArray')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._validate_backups')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._get_valid_dbids', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ParallelOperation.run')
@patch('gppylib.operations.persistent_rebuild.RemoteOperation.get_ret', side_effect=[Mock(), Exception('Error')])
def test_rebuild_with_errors(self, mock1, mock2, mock3, mock4, mock5):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.rebuild_table.dbid_info = [d1, d2]
expected_success = [d1]
expected_failure = [(d2, 'Error')]
self.assertEqual((expected_success, expected_failure), self.rebuild_table.rebuild())
class ValidatePersistentBackupTestCase(unittest.TestCase):
def setUp(self):
self.validate_persistent_backup = ValidatePersistentBackup(dbid_info=None, timestamp='20140605101010')
def test_process_results(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
self.validate_persistent_backup._process_results(d1, m)
def test_process_results_with_errors(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '/tmp/f1', '', True, False)
m2.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup._process_results(d1, m)
def test_process_results_with_missing_backup(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '', '', True, False)
m2.cmdStr = "find /foo/bar -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup._process_results(d1, m)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
def test_validate(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.validate_persistent_backup.dbid_info = [d1, d2]
self.validate_persistent_backup.validate_backups()
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.ValidatePersistentBackup._process_results', side_effect=Exception('Failed to validate backups'))
def test_validate_error_in_workerpool(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.validate_persistent_backup.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup.validate_backups()
class RunBackupRestoreTestCase(unittest.TestCase):
def setUp(self):
self.run_backup_restore = RunBackupRestore(dbid_info=None, timestamp=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore_with_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, restore=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors_with_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, restore=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore_with_validate(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, validate_backups=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors_with_validate(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, validate_backups=True)
def test_get_host_to_dbid_info_map(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
expected = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore.dbid_info = [d1, d2]
self.assertEqual(expected, self.run_backup_restore._get_host_to_dbid_info_map())
def test_get_host_to_dbid_info_map_empty(self):
self.run_backup_restore.dbid_info = []
self.assertEqual({}, self.run_backup_restore._get_host_to_dbid_info_map())
def test_get_host_to_dbid_info_map_multiple_entries_per_host(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
d2 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
expected = {'h1': [d1, d2]}
self.run_backup_restore.dbid_info = [d1, d2]
self.assertEqual(expected, self.run_backup_restore._get_host_to_dbid_info_map())
def test_process_results(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
self.run_backup_restore._process_results(m, 'ERR')
def test_process_results_with_errors(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, 'ERR', '', True, False)
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'ERR'):
self.run_backup_restore._process_results(m, 'ERR')
class ValidateMD5SumTestCase(unittest.TestCase):
def setUp(self):
self.validate_md5sum = ValidateMD5Sum(pool=None)
@patch('platform.system', return_value='Darwin')
def test_get_md5_prog_for_osx(self, mock1):
self.assertEqual('md5', self.validate_md5sum._get_md5_prog())
@patch('platform.system', return_value='Linux')
def test_get_md5_prog_for_linux(self, mock1):
self.assertEqual('md5sum', self.validate_md5sum._get_md5_prog())
@patch('platform.system', return_value='Solaris')
def test_get_md5_prog_for_invalid_os(self, mock1):
with self.assertRaisesRegexp(Exception, 'Cannot determine the md5 program since Solaris platform is not supported'):
self.validate_md5sum._get_md5_prog()
@patch('platform.system', return_value='Darwin')
def test_get_md5_results_pat_for_osx(self, mock1):
pat = re.compile('MD5 \((.*)\) = (.*)')
self.assertEqual(pat, self.validate_md5sum._get_md5_results_pat())
@patch('platform.system', return_value='Linux')
def test_get_md5_results_pat_for_osx(self, mock1):
pat = re.compile('(.*) (.*)')
self.assertEqual(pat, self.validate_md5sum._get_md5_results_pat())
@patch('platform.system', return_value='Solaris')
def test_get_md5_results_pat_for_invalid_os(self, mock1):
with self.assertRaisesRegexp(Exception, 'Cannot determine the pattern for results of md5 program since Solaris platform is not supported'):
self.validate_md5sum._get_md5_results_pat()
@patch('platform.system', return_value='Darwin')
def test_process_results_on_osx(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'MD5 (foo) = afsdfasdf', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, 'MD5 (foo1) = sdfadsff', '', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('MD5 \((.*)\) = (.*)')
expected = {'foo': 'afsdfasdf', 'foo1': 'sdfadsff'}
self.assertEqual(expected, self.validate_md5sum._process_md5_results())
@patch('platform.system', return_value='Darwin')
def test_process_results_on_osx_with_error(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'MD5 (foo1) = sdfadsff', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '', 'Error', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('MD5 \((.*)\) = (.*)')
with self.assertRaisesRegexp(Exception, 'Unable to calculate md5sum'):
self.validate_md5sum._process_md5_results()
@patch('platform.system', return_value='Linux')
def test_process_results_on_linux(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'afsdfasdf foo', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, 'sdfadsff foo1', '', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('(.*) (.*)')
expected = {'foo': 'afsdfasdf', 'foo1': 'sdfadsff'}
self.assertEqual(expected, self.validate_md5sum._process_md5_results())
@patch('platform.system', return_value='Linux')
def test_process_results_on_linux_with_error(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'sdfadsff fo1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '', 'Error', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('(.*) (.*)')
with self.assertRaisesRegexp(Exception, 'Unable to calculate md5sum'):
self.validate_md5sum._process_md5_results()
class RebuildPersistentTableTestCase(unittest.TestCase):
def setUp(self):
self.rebuild_persistent_table = RebuildPersistentTables(content_id = None,
contentid_file = None,
backup=None,
restore=None,
batch_size=None,
backup_dir=None)
@patch('gppylib.operations.persistent_rebuild.platform.system', return_value='Linux')
def test_check_platform_linux(self, mock1):
self.rebuild_persistent_table._check_platform()
@patch('gppylib.operations.persistent_rebuild.platform.system', return_value='Solaris')
def test_check_platform_non_linux(self, mock1):
with self.assertRaisesRegexp(Exception, 'This tool is only supported on Linux and OSX platforms'):
self.rebuild_persistent_table._check_platform()
def test_validate_has_mirrors_and_standby(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = True if i < 3 else False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertTrue(self.rebuild_persistent_table.has_mirrors)
def test_validate_has_mirrors_and_standby_with_no_mirrors(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertFalse(self.rebuild_persistent_table.has_mirrors)
def test_validate_has_mirrors_and_standby_with_mirrors_for_master(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = True if i == -1 else False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertTrue(self.rebuild_persistent_table.has_standby)
@patch('gppylib.operations.persistent_rebuild.findCmdInPath', return_value=True)
def test_check_md5_prog(self, mock1):
self.rebuild_persistent_table._check_md5_prog()
@patch('gppylib.operations.persistent_rebuild.findCmdInPath', return_value=False)
def test_check_md5_prog_no_md5(self, mock1):
with self.assertRaisesRegexp(Exception, 'Unable to find md5.* program. Please make sure it is in PATH'):
self.rebuild_persistent_table._check_md5_prog()
@patch('gppylib.operations.persistent_rebuild.GpVersion.local', return_value=GpVersion('4.2.7.3'))
def test_check_database_version(self, mock1):
self.rebuild_persistent_table._check_database_version()
@patch('gppylib.operations.persistent_rebuild.GpVersion.local', return_value=GpVersion('4.0.1.0'))
def test_check_database_version_with_lower_version(self, mock1):
with self.assertRaisesRegexp(Exception, 'This tool is not supported on Greenplum version lower than 4.1.0.0'):
self.rebuild_persistent_table._check_database_version()
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=[[[5090], [5091], [5092], [5093]], [[123, 'template1']],
[[5094], [16992]]])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
self.rebuild_persistent_table.dbid_info = [d1]
self.rebuild_persistent_table._get_persistent_table_filenames()
expected_global = defaultdict(defaultdict)
expected_files = ['5090', '5091', '5092', '5093']
expected_dbid = {3:expected_files}
expected_global = {'h1':expected_dbid}
expected_perdb_pt_files = defaultdict(defaultdict)
exp_pt_files = ['5094', '16992']
exp_dboid = {123:exp_pt_files}
exp_dbid = {3:exp_dboid}
expected_perdb_pt_file = {'h1':exp_dbid}
from gppylib.operations.persistent_rebuild import GLOBAL_PERSISTENT_FILES, PER_DATABASE_PERSISTENT_FILES
self.assertEqual(GLOBAL_PERSISTENT_FILES, expected_global)
self.assertEqual(PER_DATABASE_PERSISTENT_FILES, expected_perdb_pt_file)
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=pt_query_side_effect)
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames_lacking_global_relfilenode(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
global remove_global_pt_entry
remove_global_pt_entry = True
self.rebuild_persistent_table.dbid_info = [d1]
with self.assertRaisesRegexp(Exception, 'Missing relfilenode entry of global pesistent tables in pg_class'):
self.rebuild_persistent_table._get_persistent_table_filenames()
remove_global_pt_entry = False
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=pt_query_side_effect)
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames_lacking_per_database_relfilenode(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]})
global remove_per_db_pt_entry
remove_per_db_pt_entry = True
self.rebuild_persistent_table.dbid_info = [d1]
with self.assertRaisesRegexp(Exception, 'Missing relfilenode entry of per database persistent tables in pg_class'):
self.rebuild_persistent_table._get_persistent_table_filenames()
remove_per_db_pt_entry = False
if __name__ == '__main__':
unittest.main()
| 59.648174
| 151
| 0.656141
| 10,633
| 84,939
| 4.974889
| 0.039312
| 0.052611
| 0.076052
| 0.095769
| 0.922852
| 0.909581
| 0.891508
| 0.872831
| 0.856365
| 0.843472
| 0
| 0.09725
| 0.198696
| 84,939
| 1,423
| 152
| 59.690091
| 0.679954
| 0.003438
| 0
| 0.699764
| 0
| 0
| 0.209483
| 0.131751
| 0
| 0
| 0
| 0
| 0.085106
| 0
| null | null | 0.001576
| 0.00788
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53da282b4ddff21fa04d1ecc38992c8936c48332
| 61,703
|
py
|
Python
|
tests/app/questionnaire/test_navigation.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
tests/app/questionnaire/test_navigation.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | 1
|
2018-11-05T12:00:51.000Z
|
2018-11-05T12:00:51.000Z
|
tests/app/questionnaire/test_navigation.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
import uuid
from unittest.mock import MagicMock
from app.data_model.answer_store import AnswerStore, Answer
from app.questionnaire.completeness import Completeness
from app.questionnaire.location import Location
from app.questionnaire.navigation import Navigation
from app.utilities.schema import load_schema_from_params
from tests.app.app_context_test_case import AppContextTestCase
standard_routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('skip-payment-group', 0, 'skip-payment'),
Location('final-section-routed-group', 0, 'final-interstitial'),
Location('summary-group', 0, 'summary')
]
# pylint: disable=R0904,C0302
class TestNavigation(AppContextTestCase):
def test_navigation_no_blocks_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=False)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
navigation = _create_navigation(schema, AnswerStore(), metadata, [], standard_routing_path)
user_navigation = [
{
'link_name': 'Property Details',
'highlight': True,
'repeating': False,
'completed': False,
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'link_name': 'Extra Cover',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata)
},
{
'link_name': 'Payment Details',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata)
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_non_repeating_block_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
answer_store = AnswerStore()
answer_1 = Answer(
value='Contents',
group_instance=0,
answer_instance=0,
answer_id='insurance-type-answer'
)
answer_store.add(answer_1)
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address')
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'link_name': 'Property Details',
'highlight': True,
'repeating': False,
'completed': True,
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Property Interstitial',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('property-interstitial-group', 0, 'property-interstitial').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'link_name': 'Extra Cover',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata)
},
{
'link_name': 'Payment Details',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata)
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_navigation_repeating_household_and_hidden_household_groups_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('repeating-group', 1, 'repeating-block-1'),
Location('repeating-group', 1, 'repeating-block-2')
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('repeating-group', 1, 'repeating-block-1'),
Location('repeating-group', 1, 'repeating-block-2'),
Location('skip-payment-group', 0, 'skip-payment')
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
person1_uuid = uuid.uuid4()
person2_uuid = uuid.uuid4()
navigation.answer_store.answers = [
{
'group_instance': 0,
'answer_instance': 0,
'answer_id': 'first-name',
'value': 'Jim',
'group_instance_id': person1_uuid,
},
{
'group_instance': 0,
'answer_instance': 1,
'answer_id': 'first-name',
'value': 'Ben',
'group_instance_id': person2_uuid,
},
{
'group_instance': 0,
'answer_instance': 0,
'answer_id': 'what-is-your-age',
'value': None,
'group_instance_id': person1_uuid,
},
{
'group_instance': 0,
'answer_instance': 0,
'answer_id': 'what-is-your-shoe-size',
'value': None,
'group_instance_id': person1_uuid,
},
{
'group_instance': 1,
'answer_instance': 0,
'answer_id': 'what-is-your-age',
'value': None,
'group_instance_id': person2_uuid,
},
{
'group_instance': 1,
'answer_instance': 0,
'answer_id': 'what-is-your-shoe-size',
'value': None,
'group_instance_id': person2_uuid,
}
]
user_navigation = [
{
'link_name': 'Property Details',
'repeating': False,
'completed': False,
'highlight': True,
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': True,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'link_name': 'Jim',
'repeating': True,
'completed': True,
'highlight': False,
'link_url': Location('repeating-group', 0, 'repeating-block-1').url(metadata)
},
{
'link_name': 'Ben',
'repeating': True,
'completed': True,
'highlight': False,
'link_url': Location('repeating-group', 1, 'repeating-block-1').url(metadata)
},
{
'link_name': 'Extra Cover',
'repeating': False,
'completed': False,
'highlight': False,
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata)
},
{
'link_name': 'Payment Details',
'repeating': False,
'completed': False,
'highlight': False,
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata)
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_navigation_repeating_group_extra_answered_not_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block')
]
answer_store = AnswerStore()
person1_uuid = uuid.uuid4()
person2_uuid = uuid.uuid4()
answer_1 = Answer(
answer_instance=0,
answer_id='first-name',
group_instance=0,
value='Person1',
group_instance_id=person1_uuid
)
answer_2 = Answer(
answer_instance=1,
answer_id='first-name',
group_instance=0,
value='Person2',
group_instance_id=person2_uuid
)
answer_3 = Answer(
answer_instance=1,
answer_id='extra-cover-answer',
group_instance=0,
value=2,
group_instance_id=person2_uuid
)
answer_store.add(answer_1)
answer_store.add(answer_2)
answer_store.add(answer_3)
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'completed': False,
'highlight': True,
'repeating': False,
'link_name': 'Property Details',
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': True,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'completed': False,
'highlight': False,
'repeating': True,
'link_name': 'Person1',
'link_url': Location('repeating-group', 0, 'repeating-block-1').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': True,
'link_name': 'Person2',
'link_url': Location('repeating-group', 1, 'repeating-block-1').url(metadata),
},
{
'completed': True,
'highlight': False,
'repeating': False,
'link_name': 'Extra Cover',
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Extra Cover Items',
'link_url': Location('extra-cover-items-group', 0, 'extra-cover-items').url(metadata)
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Payment Details',
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata),
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_navigation_repeating_group_extra_answered_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('extra-cover-items-group', 0, 'extra-cover-items-radio'),
Location('extra-cover-items-group', 1, 'extra-cover-items'),
Location('extra-cover-items-group', 1, 'extra-cover-items-radio'),
]
answer_store = AnswerStore()
answer_1 = Answer(
value=2,
group_instance=0,
group_instance_id='group-1-0',
answer_id='extra-cover-answer',
answer_instance=0
)
answer_2 = Answer(
value='1',
group_instance=0,
group_instance_id='group-1-0',
answer_id='extra-cover-items-answer',
answer_instance=0
)
answer_3 = Answer(
value='Yes',
group_instance=0,
group_instance_id='group-1-0',
answer_id='extra-cover-items-radio-answer',
answer_instance=0
)
answer_4 = Answer(
value='2',
group_instance=1,
group_instance_id='group-1-1',
answer_id='extra-cover-items-answer',
answer_instance=0
)
answer_5 = Answer(
value='Yes',
group_instance=1,
group_instance_id='group-1-1',
answer_id='extra-cover-items-radio-answer',
answer_instance=0
)
answer_store.add(answer_1)
answer_store.add(answer_2)
answer_store.add(answer_3)
answer_store.add(answer_4)
answer_store.add(answer_5)
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('extra-cover-items-group', 0, 'extra-cover-items-radio'),
Location('extra-cover-items-group', 1, 'extra-cover-items'),
Location('extra-cover-items-group', 1, 'extra-cover-items-radio'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'repeating': False,
'highlight': True,
'completed': False,
'link_name': 'Property Details',
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'repeating': False,
'highlight': False,
'completed': True,
'link_name': 'Extra Cover',
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata)
},
{
'repeating': False,
'highlight': False,
'completed': True,
'link_name': 'Extra Cover Items',
'link_url': Location('extra-cover-items-group', 0, 'extra-cover-items').url(metadata)
},
{
'repeating': False,
'highlight': False,
'completed': False,
'link_name': 'Payment Details',
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata)
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_navigation_repeating_group_link_name_format(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('multiple-questions-group', 0, 'household-composition'),
]
answer_store = AnswerStore()
person1_uuid = uuid.uuid4()
person2_uuid = uuid.uuid4()
answer_1 = Answer(
answer_instance=0,
answer_id='first-name',
group_instance=0,
value='Joe',
group_instance_id=person1_uuid
)
answer_2 = Answer(
answer_instance=0,
answer_id='last-name',
group_instance=0,
value='Bloggs',
group_instance_id=person1_uuid
)
answer_3 = Answer(
answer_instance=1,
answer_id='first-name',
group_instance=0,
value='Jane',
group_instance_id=person2_uuid
)
answer_4 = Answer(
answer_instance=1,
answer_id='last-name',
group_instance=0,
value='Doe',
group_instance_id=person2_uuid
)
answer_store.add(answer_1)
answer_store.add(answer_2)
answer_store.add(answer_3)
answer_store.add(answer_4)
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'link_name': 'Property Details',
'highlight': True,
'repeating': False,
'completed': False,
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'repeating': False,
'completed': True,
'highlight': False,
'link_name': 'Household Composition',
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'repeating': True,
'link_name': 'Joe Bloggs',
'completed': False,
'highlight': False,
'link_url': Location('repeating-group', 0, 'repeating-block-1').url(metadata)
},
{
'repeating': True,
'link_name': 'Jane Doe',
'completed': False,
'highlight': False,
'link_url': Location('repeating-group', 1, 'repeating-block-1').url(metadata)
},
{
'link_name': 'Extra Cover',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata)
},
{
'link_name': 'Payment Details',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata)
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_navigation_skip_condition_hide_group(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = []
answer_store = AnswerStore()
answer_1 = Answer(
value='Buildings',
group_instance=0,
answer_instance=0,
answer_id='insurance-type-answer'
)
answer_store.add(answer_1)
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, [])
user_navigation = navigation.build_navigation('property-details', 0)
link_names = [d['link_name'] for d in user_navigation]
self.assertNotIn('Property Interstitial', link_names)
def test_navigation_skip_condition_show_group(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = []
answer_store = AnswerStore()
answer_1 = Answer(
value='Contents',
group_instance=0,
answer_instance=0,
answer_id='insurance-type-answer'
)
answer_store.add(answer_1)
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, [])
user_navigation = navigation.build_navigation('property-details', 0)
link_names = [d['link_name'] for d in user_navigation]
self.assertIn('Property Interstitial', link_names)
def test_navigation_skip_condition_change_answer(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = []
answer_store = AnswerStore()
answer_1 = Answer(
value='Contents',
group_instance=0,
group_instance_id='group-0',
answer_instance=0,
answer_id='insurance-type-answer'
)
answer_store.add(answer_1)
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, [])
user_navigation = navigation.build_navigation('property-details', 0)
link_names = [d['link_name'] for d in user_navigation]
self.assertIn('Property Interstitial', link_names)
change_answer = Answer(
value='Buildings',
group_instance=0,
group_instance_id='group-0',
answer_instance=0,
answer_id='insurance-type-answer'
)
answer_store.update(change_answer)
user_navigation = navigation.build_navigation('property-details', 0)
link_names = [d['link_name'] for d in user_navigation]
self.assertNotIn('Property Interstitial', link_names)
def test_build_navigation_returns_none_when_schema_navigation_is_false(self):
# Given
schema = load_schema_from_params('test', 'navigation')
schema.json['navigation'] = {'visible': False}
completed_blocks = []
metadata = {}
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, [])
# When
nav_menu = navigation.build_navigation('group-1', 'group-instance-1')
# Then
self.assertIsNone(nav_menu)
def test_build_navigation_returns_none_when_no_schema_navigation_property(self):
# Given
schema = load_schema_from_params('test', 'navigation')
del schema.json['navigation']
completed_blocks = []
metadata = {}
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, [])
# When
nav_menu = navigation.build_navigation('group-1', 'group-instance-1')
# Then
self.assertIsNone(nav_menu)
def test_build_navigation_returns_navigation_when_schema_navigation_is_true(self):
# Given
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
schema.json['navigation'] = {'visible': True}
completed_blocks = []
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, [])
# When
nav_menu = navigation.build_navigation('group-1', 'group-instance-1')
# Then
self.assertIsNotNone(nav_menu)
def test_build_navigation_summary_link_hidden_when_no_sections_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
navigation = _create_navigation(schema, AnswerStore(), metadata, [], [])
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
self.assertNotIn(confirmation_link, navigation.build_navigation('property-details', 0))
def test_build_navigation_summary_link_hidden_when_not_all_sections_completed(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, standard_routing_path)
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 5)
def test_build_navigation_summary_link_visible_when_all_sections_complete(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('extra-cover-items-group', 0, 'extra-cover-items-radio'),
Location('skip-payment-group', 0, 'skip-payment'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('extra-cover-items-group', 0, 'extra-cover-items-radio'),
Location('skip-payment-group', 0, 'skip-payment'),
Location('summary-group', 0, 'summary'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 6)
def test_build_navigation_submit_answers_link_not_visible_for_survey_with_summary(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, standard_routing_path)
confirmation_link = {
'link_name': 'Submit answers',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('confirmation-group', 0, 'confirmation').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 5)
def test_build_navigation_submit_answers_link_hidden_when_no_sections_completed(self):
schema = load_schema_from_params('test', 'navigation_confirmation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
navigation = _create_navigation(schema, AnswerStore(), metadata, [], [])
confirmation_link = {
'link_name': 'Submit answers',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('confirmation-group', 0, 'confirmation').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 4)
def test_build_navigation_submit_answers_link_hidden_when_not_all_sections_completed(self):
schema = load_schema_from_params('test', 'navigation_confirmation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, [])
confirmation_link = {
'link_name': 'Submit answers',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('confirmation-group', 0, 'confirmation').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 4)
def test_build_navigation_submit_answers_link_visible_when_all_sections_complete(self):
schema = load_schema_from_params('test', 'navigation_confirmation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('confirmation-group', 0, 'confirmation'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
confirmation_link = {
'link_name': 'Submit answers',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('confirmation-group', 0, 'confirmation').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 5)
def test_build_navigation_summary_link_not_visible_for_survey_with_confirmation(self):
schema = load_schema_from_params('test', 'navigation_confirmation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, [])
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
self.assertNotIn(confirmation_link, navigation.build_navigation('property-details', 0))
def test_build_navigation_submit_answers_link_not_visible_when_no_completed_blocks(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = []
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, standard_routing_path)
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 5)
def test_build_navigation_summary_link_hidden_when_not_on_routing_path(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('house-details', 0, 'house-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('payment-details', 0, 'credit-card'),
Location('payment-details', 0, 'expiry-date'),
Location('payment-details', 0, 'security-code'),
Location('payment-details', 0, 'security-code-interstitial'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover', 0, 'extra-cover-interstitial'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
navigation_links = navigation.build_navigation('property-details', 0)
self.assertNotIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 5)
def test_build_navigation_summary_link_shown_when_invalid_section_present(self):
schema = load_schema_from_params('test', 'navigation')
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
intro_section = {
'id': 'intro-section',
'title': 'Intro',
'groups': [{
'id': 'intro-group',
'blocks': [{
'id': 'intro-block',
'type': 'Interstitial'
}]
}]
}
schema.json['sections'].insert(0, intro_section)
# pylint: disable=protected-access
schema._parse_schema()
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('skip-payment-group', 0, 'skip-payment'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
confirmation_link = {
'link_name': 'Summary',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('summary-group', 0, 'summary').url(metadata)
}
navigation_links = navigation.build_navigation('skip-payment', 0)
self.assertIn(confirmation_link, navigation_links)
self.assertEqual(len(navigation_links), 7)
def test_build_navigation_repeated_blocks_independent_completeness(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block')
]
answer_store = AnswerStore()
person1_uuid = uuid.uuid4()
person2_uuid = uuid.uuid4()
answer_store.add(Answer(
answer_instance=0,
answer_id='first-name',
group_instance=0,
value='Person1',
group_instance_id=person1_uuid
))
answer_store.add(Answer(
answer_instance=1,
answer_id='first-name',
group_instance=0,
value='Person2',
group_instance_id=person2_uuid
))
answer_store.add(Answer(
answer_instance=0,
answer_id='what-is-your-age',
group_instance=0,
value=42,
group_instance_id=person1_uuid
))
answer_store.add(Answer(
answer_instance=0,
answer_id='what-is-your-shoe-size',
group_instance=0,
value='Employed',
group_instance_id=person1_uuid
))
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('repeating-group', 0, 'repeating-block-1'),
Location('repeating-group', 0, 'repeating-block-2'),
Location('repeating-group', 1, 'repeating-block-1'),
Location('repeating-group', 1, 'repeating-block-2'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'completed': True,
'highlight': True,
'repeating': False,
'link_name': 'Property Details',
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': True,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'completed': True,
'highlight': False,
'repeating': True,
'link_name': 'Person1',
'link_url': Location('repeating-group', 0, 'repeating-block-1').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': True,
'link_name': 'Person2',
'link_url': Location('repeating-group', 1, 'repeating-block-1').url(metadata),
},
{
'completed': True,
'highlight': False,
'repeating': False,
'link_name': 'Extra Cover',
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Payment Details',
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata),
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_build_navigation_first_group_with_skip_condition_containing_repeating_group(self):
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
# add group to extra-cover-items-section
schema.json['sections'][6]['groups'].insert(0, {
'id': 'extra-cover-items-intro',
'skip_conditions': [{
'when': [{
'id': 'extra-cover-answer',
'condition': 'not set'
}]
}],
'blocks': [{
'id': 'household-full-names-intro-block',
'type': 'Interstitial'
}]
})
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-interstitial-section', 0, 'property-interstitial'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-interstitial-section', 0, 'property-interstitial'),
Location('multiple-questions-group', 0, 'household-composition'),
Location('extra-cover', 0, 'extra-cover-block'),
Location('extra-cover-items-group', 0, 'extra-cover-items'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
user_navigation = [
{
'completed': True,
'highlight': True,
'repeating': False,
'link_name': 'Property Details',
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': True,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
# deliberately omitting extra cover section
{
'completed': True,
'highlight': False,
'repeating': False,
'link_name': 'Extra Cover',
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Payment Details',
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata),
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation(
'property-details', 0), user_navigation)
def test_build_navigation_with_single_skipped_block_in_group(self):
"""A section containing a group which doesn't have all of its blocks skipped should
have its navigation rendered
"""
schema = load_schema_from_params('test', 'navigation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
# skip the insurance-address block if insurance-type-answer is Both
schema.json['sections'][0]['groups'][0]['blocks'][1]['skip_conditions'] = [{
'when': [{
'id': 'insurance-type-answer',
'condition': 'equals',
'value': 'Both'
}]
}]
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
]
answer_store = AnswerStore()
answer_store.add(Answer(
answer_instance=0,
answer_id='insurance-type-answer',
group_instance=0,
value='Both'
))
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('skip-payment-group', 0, 'skip-payment'),
]
navigation = _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path)
user_navigation = [
{
'completed': True,
'highlight': True,
'repeating': False,
'link_name': 'Property Details',
'link_url': Location('property-details', 0, 'insurance-type').url(metadata)
},
{
'link_name': 'House Details',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('house-details', 0, 'house-type').url(metadata)
},
{
'link_name': 'Household Composition',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('multiple-questions-group', 0, 'household-composition').url(metadata)
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Extra Cover',
'link_url': Location('extra-cover', 0, 'extra-cover-block').url(metadata),
},
{
'completed': False,
'highlight': False,
'repeating': False,
'link_name': 'Payment Details',
'link_url': Location('skip-payment-group', 0, 'skip-payment').url(metadata),
},
{
'link_name': 'Final section',
'highlight': False,
'repeating': False,
'completed': False,
'link_url': Location('final-section-routed-group', 0, 'final-interstitial').url(metadata)
}
]
self.assertEqual(navigation.build_navigation('property-details', 0), user_navigation)
def test_build_navigation_completed_section_with_summary_links_to_last_block(self):
schema = load_schema_from_params('test', 'navigation_confirmation')
schema.answer_is_in_repeating_group = MagicMock(return_value=True)
schema.json['sections'][0]['groups'][0]['blocks'].append({
'id': 'property-summary',
'type': 'SectionSummary'
})
metadata = {
'eq_id': '1',
'collection_exercise_sid': '999',
'form_type': 'some_form'
}
completed_blocks = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
]
routing_path = [
Location('property-details', 0, 'insurance-type'),
Location('property-details', 0, 'insurance-address'),
Location('property-details', 0, 'property-interstitial'),
Location('property-details', 0, 'property-summary'),
]
navigation = _create_navigation(schema, AnswerStore(), metadata, completed_blocks, routing_path)
confirmation_link = {
'link_name': 'Property Details',
'highlight': True,
'repeating': False,
'completed': True,
'link_url': Location('property-details', 0, 'property-summary').url(metadata)
}
self.assertIn(confirmation_link, navigation.build_navigation('property-details', 0))
def _create_navigation(schema, answer_store, metadata, completed_blocks, routing_path):
completeness = Completeness(schema, answer_store, completed_blocks, routing_path, metadata)
return Navigation(schema, answer_store, metadata, completed_blocks, routing_path, completeness)
| 38.978522
| 113
| 0.55631
| 5,760
| 61,703
| 5.749653
| 0.034375
| 0.041367
| 0.04493
| 0.050003
| 0.937496
| 0.928891
| 0.91947
| 0.911981
| 0.900387
| 0.880971
| 0
| 0.015092
| 0.313793
| 61,703
| 1,582
| 114
| 39.003161
| 0.767082
| 0.005932
| 0
| 0.756044
| 0
| 0
| 0.285591
| 0.069428
| 0
| 0
| 0
| 0
| 0.027106
| 1
| 0.020513
| false
| 0
| 0.005861
| 0
| 0.027839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9909658565d4d55d87a6f691f953434c8af8ebfb
| 28,715
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/vpc_peering_connection_accepter.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_peering_connection_accepter.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/vpc_peering_connection_accepter.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpcPeeringConnectionAccepterArgs', 'VpcPeeringConnectionAccepter']
@pulumi.input_type
class VpcPeeringConnectionAccepterArgs:
def __init__(__self__, *,
vpc_peering_connection_id: pulumi.Input[str],
accepter: Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']] = None,
auto_accept: Optional[pulumi.Input[bool]] = None,
requester: Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a VpcPeeringConnectionAccepter resource.
:param pulumi.Input[str] vpc_peering_connection_id: The VPC Peering Connection ID to manage.
:param pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs'] accepter: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
:param pulumi.Input[bool] auto_accept: Whether or not to accept the peering request. Defaults to `false`.
:param pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs'] requester: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
pulumi.set(__self__, "vpc_peering_connection_id", vpc_peering_connection_id)
if accepter is not None:
pulumi.set(__self__, "accepter", accepter)
if auto_accept is not None:
pulumi.set(__self__, "auto_accept", auto_accept)
if requester is not None:
pulumi.set(__self__, "requester", requester)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="vpcPeeringConnectionId")
def vpc_peering_connection_id(self) -> pulumi.Input[str]:
"""
The VPC Peering Connection ID to manage.
"""
return pulumi.get(self, "vpc_peering_connection_id")
@vpc_peering_connection_id.setter
def vpc_peering_connection_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_peering_connection_id", value)
@property
@pulumi.getter
def accepter(self) -> Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']]:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
"""
return pulumi.get(self, "accepter")
@accepter.setter
def accepter(self, value: Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']]):
pulumi.set(self, "accepter", value)
@property
@pulumi.getter(name="autoAccept")
def auto_accept(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to accept the peering request. Defaults to `false`.
"""
return pulumi.get(self, "auto_accept")
@auto_accept.setter
def auto_accept(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_accept", value)
@property
@pulumi.getter
def requester(self) -> Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']]:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
"""
return pulumi.get(self, "requester")
@requester.setter
def requester(self, value: Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']]):
pulumi.set(self, "requester", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _VpcPeeringConnectionAccepterState:
def __init__(__self__, *,
accept_status: Optional[pulumi.Input[str]] = None,
accepter: Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']] = None,
auto_accept: Optional[pulumi.Input[bool]] = None,
peer_owner_id: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_vpc_id: Optional[pulumi.Input[str]] = None,
requester: Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
vpc_peering_connection_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VpcPeeringConnectionAccepter resources.
:param pulumi.Input[str] accept_status: The status of the VPC Peering Connection request.
:param pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs'] accepter: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
:param pulumi.Input[bool] auto_accept: Whether or not to accept the peering request. Defaults to `false`.
:param pulumi.Input[str] peer_owner_id: The AWS account ID of the owner of the requester VPC.
:param pulumi.Input[str] peer_region: The region of the accepter VPC.
:param pulumi.Input[str] peer_vpc_id: The ID of the requester VPC.
:param pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs'] requester: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_id: The ID of the accepter VPC.
:param pulumi.Input[str] vpc_peering_connection_id: The VPC Peering Connection ID to manage.
"""
if accept_status is not None:
pulumi.set(__self__, "accept_status", accept_status)
if accepter is not None:
pulumi.set(__self__, "accepter", accepter)
if auto_accept is not None:
pulumi.set(__self__, "auto_accept", auto_accept)
if peer_owner_id is not None:
pulumi.set(__self__, "peer_owner_id", peer_owner_id)
if peer_region is not None:
pulumi.set(__self__, "peer_region", peer_region)
if peer_vpc_id is not None:
pulumi.set(__self__, "peer_vpc_id", peer_vpc_id)
if requester is not None:
pulumi.set(__self__, "requester", requester)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
if vpc_peering_connection_id is not None:
pulumi.set(__self__, "vpc_peering_connection_id", vpc_peering_connection_id)
@property
@pulumi.getter(name="acceptStatus")
def accept_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the VPC Peering Connection request.
"""
return pulumi.get(self, "accept_status")
@accept_status.setter
def accept_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accept_status", value)
@property
@pulumi.getter
def accepter(self) -> Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']]:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
"""
return pulumi.get(self, "accepter")
@accepter.setter
def accepter(self, value: Optional[pulumi.Input['VpcPeeringConnectionAccepterAccepterArgs']]):
pulumi.set(self, "accepter", value)
@property
@pulumi.getter(name="autoAccept")
def auto_accept(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to accept the peering request. Defaults to `false`.
"""
return pulumi.get(self, "auto_accept")
@auto_accept.setter
def auto_accept(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_accept", value)
@property
@pulumi.getter(name="peerOwnerId")
def peer_owner_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID of the owner of the requester VPC.
"""
return pulumi.get(self, "peer_owner_id")
@peer_owner_id.setter
def peer_owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_owner_id", value)
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> Optional[pulumi.Input[str]]:
"""
The region of the accepter VPC.
"""
return pulumi.get(self, "peer_region")
@peer_region.setter
def peer_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_region", value)
@property
@pulumi.getter(name="peerVpcId")
def peer_vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the requester VPC.
"""
return pulumi.get(self, "peer_vpc_id")
@peer_vpc_id.setter
def peer_vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_vpc_id", value)
@property
@pulumi.getter
def requester(self) -> Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']]:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
"""
return pulumi.get(self, "requester")
@requester.setter
def requester(self, value: Optional[pulumi.Input['VpcPeeringConnectionAccepterRequesterArgs']]):
pulumi.set(self, "requester", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the accepter VPC.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="vpcPeeringConnectionId")
def vpc_peering_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
The VPC Peering Connection ID to manage.
"""
return pulumi.get(self, "vpc_peering_connection_id")
@vpc_peering_connection_id.setter
def vpc_peering_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_peering_connection_id", value)
class VpcPeeringConnectionAccepter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accepter: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterAccepterArgs']]] = None,
auto_accept: Optional[pulumi.Input[bool]] = None,
requester: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterRequesterArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_peering_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage the accepter's side of a VPC Peering Connection.
When a cross-account (requester's AWS account differs from the accepter's AWS account) or an inter-region
VPC Peering Connection is created, a VPC Peering Connection resource is automatically created in the
accepter's account.
The requester can use the `ec2.VpcPeeringConnection` resource to manage its side of the connection
and the accepter can use the `ec2.VpcPeeringConnectionAccepter` resource to "adopt" its side of the
connection into management.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_pulumi as pulumi
peer = pulumi.providers.Aws("peer", region="us-west-2")
# Accepter's credentials.
main = aws.ec2.Vpc("main", cidr_block="10.0.0.0/16")
peer_vpc = aws.ec2.Vpc("peerVpc", cidr_block="10.1.0.0/16",
opts=pulumi.ResourceOptions(provider=aws["peer"]))
peer_caller_identity = aws.get_caller_identity()
# Requester's side of the connection.
peer_vpc_peering_connection = aws.ec2.VpcPeeringConnection("peerVpcPeeringConnection",
vpc_id=main.id,
peer_vpc_id=peer_vpc.id,
peer_owner_id=peer_caller_identity.account_id,
peer_region="us-west-2",
auto_accept=False,
tags={
"Side": "Requester",
})
# Accepter's side of the connection.
peer_vpc_peering_connection_accepter = aws.ec2.VpcPeeringConnectionAccepter("peerVpcPeeringConnectionAccepter",
vpc_peering_connection_id=peer_vpc_peering_connection.id,
auto_accept=True,
tags={
"Side": "Accepter",
},
opts=pulumi.ResourceOptions(provider=aws["peer"]))
```
## Import
VPC Peering Connection Accepters can be imported by using the Peering Connection ID, e.g.
```sh
$ pulumi import aws:ec2/vpcPeeringConnectionAccepter:VpcPeeringConnectionAccepter example pcx-12345678
```
Certain resource arguments, like `auto_accept`, do not have an EC2 API method for reading the information after peering connection creation. If the argument is set in the provider configuration on an imported resource, this provder will always show a difference. To workaround this behavior, either omit the argument from the configuration or use [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) to hide the difference, e.g. terraform resource "aws_vpc_peering_connection_accepter" "example" {
# ... other configuration ...
# There is no AWS EC2 API for reading auto_accept
lifecycle {
ignore_changes = [auto_accept]
} }
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterAccepterArgs']] accepter: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
:param pulumi.Input[bool] auto_accept: Whether or not to accept the peering request. Defaults to `false`.
:param pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterRequesterArgs']] requester: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_peering_connection_id: The VPC Peering Connection ID to manage.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpcPeeringConnectionAccepterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage the accepter's side of a VPC Peering Connection.
When a cross-account (requester's AWS account differs from the accepter's AWS account) or an inter-region
VPC Peering Connection is created, a VPC Peering Connection resource is automatically created in the
accepter's account.
The requester can use the `ec2.VpcPeeringConnection` resource to manage its side of the connection
and the accepter can use the `ec2.VpcPeeringConnectionAccepter` resource to "adopt" its side of the
connection into management.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_pulumi as pulumi
peer = pulumi.providers.Aws("peer", region="us-west-2")
# Accepter's credentials.
main = aws.ec2.Vpc("main", cidr_block="10.0.0.0/16")
peer_vpc = aws.ec2.Vpc("peerVpc", cidr_block="10.1.0.0/16",
opts=pulumi.ResourceOptions(provider=aws["peer"]))
peer_caller_identity = aws.get_caller_identity()
# Requester's side of the connection.
peer_vpc_peering_connection = aws.ec2.VpcPeeringConnection("peerVpcPeeringConnection",
vpc_id=main.id,
peer_vpc_id=peer_vpc.id,
peer_owner_id=peer_caller_identity.account_id,
peer_region="us-west-2",
auto_accept=False,
tags={
"Side": "Requester",
})
# Accepter's side of the connection.
peer_vpc_peering_connection_accepter = aws.ec2.VpcPeeringConnectionAccepter("peerVpcPeeringConnectionAccepter",
vpc_peering_connection_id=peer_vpc_peering_connection.id,
auto_accept=True,
tags={
"Side": "Accepter",
},
opts=pulumi.ResourceOptions(provider=aws["peer"]))
```
## Import
VPC Peering Connection Accepters can be imported by using the Peering Connection ID, e.g.
```sh
$ pulumi import aws:ec2/vpcPeeringConnectionAccepter:VpcPeeringConnectionAccepter example pcx-12345678
```
Certain resource arguments, like `auto_accept`, do not have an EC2 API method for reading the information after peering connection creation. If the argument is set in the provider configuration on an imported resource, this provder will always show a difference. To workaround this behavior, either omit the argument from the configuration or use [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) to hide the difference, e.g. terraform resource "aws_vpc_peering_connection_accepter" "example" {
# ... other configuration ...
# There is no AWS EC2 API for reading auto_accept
lifecycle {
ignore_changes = [auto_accept]
} }
:param str resource_name: The name of the resource.
:param VpcPeeringConnectionAccepterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcPeeringConnectionAccepterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accepter: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterAccepterArgs']]] = None,
auto_accept: Optional[pulumi.Input[bool]] = None,
requester: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterRequesterArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_peering_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpcPeeringConnectionAccepterArgs.__new__(VpcPeeringConnectionAccepterArgs)
__props__.__dict__["accepter"] = accepter
__props__.__dict__["auto_accept"] = auto_accept
__props__.__dict__["requester"] = requester
__props__.__dict__["tags"] = tags
if vpc_peering_connection_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_peering_connection_id'")
__props__.__dict__["vpc_peering_connection_id"] = vpc_peering_connection_id
__props__.__dict__["accept_status"] = None
__props__.__dict__["peer_owner_id"] = None
__props__.__dict__["peer_region"] = None
__props__.__dict__["peer_vpc_id"] = None
__props__.__dict__["vpc_id"] = None
super(VpcPeeringConnectionAccepter, __self__).__init__(
'aws:ec2/vpcPeeringConnectionAccepter:VpcPeeringConnectionAccepter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accept_status: Optional[pulumi.Input[str]] = None,
accepter: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterAccepterArgs']]] = None,
auto_accept: Optional[pulumi.Input[bool]] = None,
peer_owner_id: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_vpc_id: Optional[pulumi.Input[str]] = None,
requester: Optional[pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterRequesterArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
vpc_peering_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpcPeeringConnectionAccepter':
"""
Get an existing VpcPeeringConnectionAccepter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accept_status: The status of the VPC Peering Connection request.
:param pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterAccepterArgs']] accepter: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
:param pulumi.Input[bool] auto_accept: Whether or not to accept the peering request. Defaults to `false`.
:param pulumi.Input[str] peer_owner_id: The AWS account ID of the owner of the requester VPC.
:param pulumi.Input[str] peer_region: The region of the accepter VPC.
:param pulumi.Input[str] peer_vpc_id: The ID of the requester VPC.
:param pulumi.Input[pulumi.InputType['VpcPeeringConnectionAccepterRequesterArgs']] requester: A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_id: The ID of the accepter VPC.
:param pulumi.Input[str] vpc_peering_connection_id: The VPC Peering Connection ID to manage.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VpcPeeringConnectionAccepterState.__new__(_VpcPeeringConnectionAccepterState)
__props__.__dict__["accept_status"] = accept_status
__props__.__dict__["accepter"] = accepter
__props__.__dict__["auto_accept"] = auto_accept
__props__.__dict__["peer_owner_id"] = peer_owner_id
__props__.__dict__["peer_region"] = peer_region
__props__.__dict__["peer_vpc_id"] = peer_vpc_id
__props__.__dict__["requester"] = requester
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["vpc_peering_connection_id"] = vpc_peering_connection_id
return VpcPeeringConnectionAccepter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceptStatus")
def accept_status(self) -> pulumi.Output[str]:
"""
The status of the VPC Peering Connection request.
"""
return pulumi.get(self, "accept_status")
@property
@pulumi.getter
def accepter(self) -> pulumi.Output['outputs.VpcPeeringConnectionAccepterAccepter']:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC.
"""
return pulumi.get(self, "accepter")
@property
@pulumi.getter(name="autoAccept")
def auto_accept(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not to accept the peering request. Defaults to `false`.
"""
return pulumi.get(self, "auto_accept")
@property
@pulumi.getter(name="peerOwnerId")
def peer_owner_id(self) -> pulumi.Output[str]:
"""
The AWS account ID of the owner of the requester VPC.
"""
return pulumi.get(self, "peer_owner_id")
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> pulumi.Output[str]:
"""
The region of the accepter VPC.
"""
return pulumi.get(self, "peer_region")
@property
@pulumi.getter(name="peerVpcId")
def peer_vpc_id(self) -> pulumi.Output[str]:
"""
The ID of the requester VPC.
"""
return pulumi.get(self, "peer_vpc_id")
@property
@pulumi.getter
def requester(self) -> pulumi.Output['outputs.VpcPeeringConnectionAccepterRequester']:
"""
A configuration block that describes [VPC Peering Connection]
(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC.
"""
return pulumi.get(self, "requester")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
The ID of the accepter VPC.
"""
return pulumi.get(self, "vpc_id")
@property
@pulumi.getter(name="vpcPeeringConnectionId")
def vpc_peering_connection_id(self) -> pulumi.Output[str]:
"""
The VPC Peering Connection ID to manage.
"""
return pulumi.get(self, "vpc_peering_connection_id")
| 46.843393
| 548
| 0.667247
| 3,356
| 28,715
| 5.501788
| 0.072706
| 0.066724
| 0.081239
| 0.051235
| 0.869909
| 0.850845
| 0.831293
| 0.821328
| 0.808871
| 0.788778
| 0
| 0.003084
| 0.232039
| 28,715
| 612
| 549
| 46.919935
| 0.83421
| 0.415079
| 0
| 0.648276
| 1
| 0
| 0.14793
| 0.086359
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162069
| false
| 0.003448
| 0.024138
| 0
| 0.286207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54d1f67854df8b103e4576cadafca8ecb4b3398a
| 19,567
|
py
|
Python
|
python/swagger_client/api/admins_api.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
python/swagger_client/api/admins_api.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
python/swagger_client/api/admins_api.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
# coding: utf-8
"""
Easy!Appointments API
These are the OpenAPI specs that describe the REST API of Easy!Appointments. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: info@easyappointments.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AdminsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def admins_admin_id_delete(self, admin_id, **kwargs): # noqa: E501
"""Delete an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_delete(admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int admin_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admins_admin_id_delete_with_http_info(admin_id, **kwargs) # noqa: E501
else:
(data) = self.admins_admin_id_delete_with_http_info(admin_id, **kwargs) # noqa: E501
return data
def admins_admin_id_delete_with_http_info(self, admin_id, **kwargs): # noqa: E501
"""Delete an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_delete_with_http_info(admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int admin_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['admin_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admins_admin_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'admin_id' is set
if ('admin_id' not in params or
params['admin_id'] is None):
raise ValueError("Missing the required parameter `admin_id` when calling `admins_admin_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'admin_id' in params:
path_params['adminId'] = params['admin_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501
return self.api_client.call_api(
'/admins/{adminId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admins_admin_id_get(self, admin_id, **kwargs): # noqa: E501
"""Get an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_get(admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int admin_id: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admins_admin_id_get_with_http_info(admin_id, **kwargs) # noqa: E501
else:
(data) = self.admins_admin_id_get_with_http_info(admin_id, **kwargs) # noqa: E501
return data
def admins_admin_id_get_with_http_info(self, admin_id, **kwargs): # noqa: E501
"""Get an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_get_with_http_info(admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int admin_id: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['admin_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admins_admin_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'admin_id' is set
if ('admin_id' not in params or
params['admin_id'] is None):
raise ValueError("Missing the required parameter `admin_id` when calling `admins_admin_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'admin_id' in params:
path_params['adminId'] = params['admin_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501
return self.api_client.call_api(
'/admins/{adminId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdminRecord', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admins_admin_id_put(self, body, admin_id, **kwargs): # noqa: E501
"""Update an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_put(body, admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AdminPayload body: (required)
:param int admin_id: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admins_admin_id_put_with_http_info(body, admin_id, **kwargs) # noqa: E501
else:
(data) = self.admins_admin_id_put_with_http_info(body, admin_id, **kwargs) # noqa: E501
return data
def admins_admin_id_put_with_http_info(self, body, admin_id, **kwargs): # noqa: E501
"""Update an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_admin_id_put_with_http_info(body, admin_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AdminPayload body: (required)
:param int admin_id: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'admin_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admins_admin_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `admins_admin_id_put`") # noqa: E501
# verify the required parameter 'admin_id' is set
if ('admin_id' not in params or
params['admin_id'] is None):
raise ValueError("Missing the required parameter `admin_id` when calling `admins_admin_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'admin_id' in params:
path_params['adminId'] = params['admin_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501
return self.api_client.call_api(
'/admins/{adminId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdminRecord', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admins_get(self, **kwargs): # noqa: E501
"""Get all admins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page:
:param int length:
:param str sort:
:param str q:
:param str fields:
:param str _with:
:return: AdminCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admins_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.admins_get_with_http_info(**kwargs) # noqa: E501
return data
def admins_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all admins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page:
:param int length:
:param str sort:
:param str q:
:param str fields:
:param str _with:
:return: AdminCollection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'length', 'sort', 'q', 'fields', '_with'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admins_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'length' in params:
query_params.append(('length', params['length'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if '_with' in params:
query_params.append(('with', params['_with'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501
return self.api_client.call_api(
'/admins', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdminCollection', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admins_post(self, body, **kwargs): # noqa: E501
"""Create an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AdminPayload body: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admins_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.admins_post_with_http_info(body, **kwargs) # noqa: E501
return data
def admins_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create an admin # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admins_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AdminPayload body: (required)
:return: AdminRecord
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admins_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `admins_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuth', 'BearerToken'] # noqa: E501
return self.api_client.call_api(
'/admins', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdminRecord', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.849341
| 125
| 0.596412
| 2,262
| 19,567
| 4.895225
| 0.074713
| 0.044252
| 0.029351
| 0.032512
| 0.932448
| 0.91249
| 0.904814
| 0.890906
| 0.885758
| 0.87745
| 0
| 0.015969
| 0.308734
| 19,567
| 530
| 126
| 36.918868
| 0.802676
| 0.301528
| 0
| 0.768421
| 0
| 0
| 0.176756
| 0.034874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038596
| false
| 0
| 0.014035
| 0
| 0.108772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07327704c291a8ca3e231ffd1369f84eebda10c4
| 16,203
|
py
|
Python
|
precise/skaters/managers/schurmanagers.py
|
microprediction/precise
|
0aa7c69c3c280926cec03fb6fc0934a6193da440
|
[
"MIT"
] | 40
|
2022-01-13T00:40:59.000Z
|
2022-03-31T20:33:19.000Z
|
precise/skaters/managers/schurmanagers.py
|
microprediction/precise
|
0aa7c69c3c280926cec03fb6fc0934a6193da440
|
[
"MIT"
] | 14
|
2022-01-08T16:00:12.000Z
|
2022-03-16T00:12:04.000Z
|
precise/skaters/managers/schurmanagers.py
|
microprediction/precise
|
0aa7c69c3c280926cec03fb6fc0934a6193da440
|
[
"MIT"
] | 9
|
2022-01-26T21:14:43.000Z
|
2022-03-21T17:32:02.000Z
|
from precise.skaters.managers.schurmanagerfactory import schur_weak_weak_pm_manager_factory, schur_weak_weak_ewa_manager_factory, \
schur_diag_weak_pm_manager_factory, schur_vol_vol_ewa_manager_factory, schur_weak_vol_ewa_manager_factory, schur_diag_diag_ewa_manager_factory
from precise.skaters.managers.buyandholdfactory import buy_and_hold
USE_JS = False
# gamma = 1.0
# r=0.025...
def schur_weak_weak_pm_t0_r025_n50_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, gamma=1.0, delta=0.0, j=j)
### Same but with weak entropish and larger sub-portfolios
# ... first h=5.0
def schur_weak_weak_pm_t0_r025_n50_s25_g100_h500_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, h=5.0, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s25_g100_h500_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, h=5.0,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s25_g100_h500_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, h=5.0,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s25_g100_h500_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, h=5.0,gamma=1.0, delta=0.0, j=j)
SCHUR_GAMMA_100_S25_H500_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s25_g100_h500_long_manager, schur_weak_weak_pm_t0_r050_n25_s25_g100_h500_long_manager,
schur_weak_weak_ewa_r025_n50_s25_g100_h500_long_manager, schur_weak_weak_ewa_r050_n25_s25_g100_h500_long_manager]
### Same but with weak entropish and larger sub-portfolios and s=100
# ... first h=5.0
def schur_weak_weak_pm_t0_r025_n50_s100_g100_h500_long_manager(y, s=100, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, h=5.0, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s100_g100_h500_long_manager(y, s=100, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, h=5.0,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s100_g100_h500_long_manager(y, s=100, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, h=5.0,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s100_g100_h500_long_manager(y, s=100, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, h=5.0,gamma=1.0, delta=0.0, j=j)
SCHUR_GAMMA_100_S100_H500_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s100_g100_h500_long_manager,
schur_weak_weak_pm_t0_r050_n25_s100_g100_h500_long_manager,
schur_weak_weak_ewa_r025_n50_s100_g100_h500_long_manager,
schur_weak_weak_ewa_r050_n25_s100_g100_h500_long_manager]
# ... then h=1.5
def schur_weak_weak_pm_t0_r025_n50_s25_g100_h150_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, h=1.5, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s25_g100_h150_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, h=1.0,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s25_g100_h150_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, h=1.5,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s25_g100_h150_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, h=1.5,gamma=1.0, delta=0.0, j=j)
SCHUR_GAMMA_100_H150_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s25_g100_h150_long_manager,
schur_weak_weak_pm_t0_r050_n25_s25_g100_h150_long_manager,
schur_weak_weak_ewa_r025_n50_s25_g100_h150_long_manager,
schur_weak_weak_ewa_r050_n25_s25_g100_h150_long_manager]
# ... then h=1.25
def schur_weak_weak_pm_t0_r025_n50_s25_g100_h125_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, h=1.25, gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s25_g100_h125_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, h=1.25,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s25_g100_h125_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, h=1.25,gamma=1.0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s25_g100_h125_long_manager(y, s=25, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, h=1.25,gamma=1.0, delta=0.0, j=j)
SCHUR_GAMMA_100_H125_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s25_g100_h125_long_manager,
schur_weak_weak_pm_t0_r050_n25_s25_g100_h125_long_manager,
schur_weak_weak_ewa_r025_n50_s25_g100_h125_long_manager,
schur_weak_weak_ewa_r050_n25_s25_g100_h125_long_manager]
#-# r=0.05 ...
def schur_weak_vol_ewa_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=1.0, delta=0.0, j=j)
def schur_vol_vol_ewa_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_vol_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=1.0, delta=0.0, j=j)
def schur_diag_diag_ewa_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_diag_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=1.0, delta=0.0, j=j)
def schur_diag_weak_pm_t0_r050_n25_s5_g100_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_weak_pm_manager_factory(y=y, s=s, e=e, r=0.05, target=0, n_emp=25, n_split=5, gamma=1.0, delta=0.0, j=j)
SCHUR_GAMMA_100_ENTROPISH_LONG_MANAGERS = SCHUR_GAMMA_100_H125_LONG_MANAGERS + \
SCHUR_GAMMA_100_H150_LONG_MANAGERS + \
SCHUR_GAMMA_100_S25_H500_LONG_MANAGERS + SCHUR_GAMMA_100_S100_H500_LONG_MANAGERS
SCHUR_GAMMA_100_NON_ENTROPOSH_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s5_g100_long_manager,
schur_weak_weak_pm_t0_r050_n25_s5_g100_long_manager,
schur_weak_weak_ewa_r025_n50_s5_g100_long_manager,
schur_weak_weak_ewa_r050_n25_s5_g100_long_manager,
schur_weak_vol_ewa_r050_n25_s5_g100_long_manager,
schur_vol_vol_ewa_r050_n25_s5_g100_long_manager,
schur_diag_diag_ewa_r050_n25_s5_g100_long_manager,
schur_diag_weak_pm_t0_r050_n25_s5_g100_long_manager]
SCHUR_GAMMA_100_LONG_MANAGERS = SCHUR_GAMMA_100_ENTROPISH_LONG_MANAGERS + SCHUR_GAMMA_100_NON_ENTROPOSH_LONG_MANAGERS
# r=0.025...
def schur_weak_weak_pm_t0_r025_n50_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50, gamma=0.5, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25, gamma=0.5, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50, gamma=0.5, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, gamma=0.5, delta=0.0, j=j)
#-# # r=0.05...
def schur_weak_vol_ewa_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=0.5, delta=0.0, j=j)
def schur_vol_vol_ewa_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_vol_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=0.5, delta=0.0, j=j)
def schur_diag_diag_ewa_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_diag_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5, gamma=0.5, delta=0.0, j=j)
def schur_diag_weak_pm_t0_r050_n25_s5_g050_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_weak_pm_manager_factory(y=y, s=s, e=e, r=0.05, target=0, n_emp=25, n_split=5, gamma=0.5, delta=0.0, j=j)
SCHUR_GAMMA_050_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s5_g050_long_manager,
schur_weak_weak_pm_t0_r050_n25_s5_g050_long_manager,
schur_weak_weak_ewa_r025_n50_s5_g050_long_manager,
schur_weak_weak_ewa_r050_n25_s5_g050_long_manager,
schur_weak_vol_ewa_r050_n25_s5_g050_long_manager,
schur_vol_vol_ewa_r050_n25_s5_g050_long_manager,
schur_diag_diag_ewa_r050_n25_s5_g050_long_manager,
schur_diag_weak_pm_t0_r050_n25_s5_g050_long_manager]
def schur_weak_weak_pm_t0_r025_n50_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50,gamma=0.1, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25,gamma=0.1, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50,gamma=0.1, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25,gamma=0.1, delta=0.0, j=j)
#-#
def schur_weak_vol_ewa_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0.1, delta=0.0, j=j)
def schur_vol_vol_ewa_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_vol_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0.1, delta=0.0, j=j)
def schur_diag_diag_ewa_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_diag_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0.1, delta=0.0, j=j)
def schur_diag_weak_pm_t0_r050_n25_s5_g010_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_weak_pm_manager_factory(y=y, s=s, e=e, r=0.05, target=0, n_emp=25, n_split=5,gamma=0.1, delta=0.0, j=j)
SCHUR_GAMMA_010_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s5_g010_long_manager,
schur_weak_weak_pm_t0_r050_n25_s5_g010_long_manager,
schur_weak_weak_ewa_r025_n50_s5_g010_long_manager,
schur_weak_weak_ewa_r050_n25_s5_g010_long_manager,
schur_weak_vol_ewa_r050_n25_s5_g010_long_manager,
schur_vol_vol_ewa_r050_n25_s5_g010_long_manager,
schur_diag_diag_ewa_r050_n25_s5_g010_long_manager,
schur_diag_weak_pm_t0_r050_n25_s5_g010_long_manager]
def schur_weak_weak_pm_t0_r025_n50_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.025, n_emp=50,gamma=0, delta=0.0, j=j)
def schur_weak_weak_pm_t0_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_pm_manager_factory(y=y, s=s, target=0, e=e, r=0.050, n_emp=25,gamma=0, delta=0.0, j=j)
def schur_weak_weak_ewa_r025_n50_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.025, n_emp=50,gamma=0, delta=0.0, j=j)
def schur_weak_weak_ewa_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_weak_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25,gamma=0, delta=0.0, j=j)
#-#
def schur_weak_vol_ewa_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_weak_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0, delta=0.0, j=j)
def schur_vol_vol_ewa_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_vol_vol_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0, delta=0.0, j=j)
def schur_diag_diag_ewa_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_diag_ewa_manager_factory(y=y, s=s, e=e, r=0.05, n_emp=25, n_split=5,gamma=0, delta=0.0, j=j)
def schur_diag_weak_pm_t0_r050_n25_s5_g000_long_manager(y, s, k=1,e=1, j=1):
assert k==1
return schur_diag_weak_pm_manager_factory(y=y, s=s, e=e, r=0.05, target=0, n_emp=25, n_split=5,gamma=0, delta=0.0, j=j)
SCHUR_GAMMA_000_LONG_MANAGERS = [schur_weak_weak_pm_t0_r025_n50_s5_g000_long_manager,
schur_weak_weak_pm_t0_r050_n25_s5_g000_long_manager,
schur_weak_weak_ewa_r025_n50_s5_g000_long_manager,
schur_weak_weak_ewa_r050_n25_s5_g000_long_manager,
schur_weak_vol_ewa_r050_n25_s5_g000_long_manager,
schur_vol_vol_ewa_r050_n25_s5_g000_long_manager,
schur_diag_diag_ewa_r050_n25_s5_g000_long_manager,
schur_diag_weak_pm_t0_r050_n25_s5_g000_long_manager]
SCHUR_J1_LONG_MANAGERS = SCHUR_GAMMA_100_LONG_MANAGERS + SCHUR_GAMMA_050_LONG_MANAGERS + SCHUR_GAMMA_010_LONG_MANAGERS + SCHUR_GAMMA_000_LONG_MANAGERS
SCHUR_LS_MANAGERS = []
# Remark: Functions not defined at top level don't always play nice with multiprocessing
if USE_JS:
SCHUR_J5_LONG_MANAGERS = [ buy_and_hold(mgr,j=5) for mgr in SCHUR_J1_LONG_MANAGERS ]
SCHUR_J20_LONG_MANAGERS = [ buy_and_hold(mgr,j=20) for mgr in SCHUR_J1_LONG_MANAGERS ]
else:
SCHUR_J5_LONG_MANAGERS = []
SCHUR_J20_LONG_MANAGERS = []
SCHUR_LONG_MANAGERS = SCHUR_J1_LONG_MANAGERS + SCHUR_J5_LONG_MANAGERS + SCHUR_J20_LONG_MANAGERS
SCHUR_MANAGERS = SCHUR_LONG_MANAGERS + SCHUR_LS_MANAGERS
| 45.386555
| 159
| 0.702154
| 3,362
| 16,203
| 2.963117
| 0.029149
| 0.100281
| 0.127886
| 0.07378
| 0.976912
| 0.966171
| 0.939872
| 0.916382
| 0.909356
| 0.877033
| 0
| 0.145534
| 0.183238
| 16,203
| 357
| 160
| 45.386555
| 0.607224
| 0.020367
| 0
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.230769
| false
| 0
| 0.009615
| 0
| 0.471154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07344d2471c05eac78b3615f9cd0f0e9c80c75cc
| 20,255
|
py
|
Python
|
hardware/tests/test_sense_pi.py
|
ab7289/mercury-hardware
|
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
|
[
"MIT"
] | 1
|
2020-05-09T21:37:12.000Z
|
2020-05-09T21:37:12.000Z
|
hardware/tests/test_sense_pi.py
|
ab7289/mercury-hardware
|
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
|
[
"MIT"
] | 8
|
2020-05-07T01:54:14.000Z
|
2020-05-13T21:31:56.000Z
|
hardware/tests/test_sense_pi.py
|
ab7289/mercury-hardware
|
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
|
[
"MIT"
] | 2
|
2020-05-06T22:24:20.000Z
|
2020-05-13T20:32:29.000Z
|
import unittest
from unittest.mock import patch, MagicMock
from testfixtures import TempDirectory
import os
import sys
sys.modules[
"sense_hat"
] = MagicMock() # mock these modules so that they don't have to be installed
sys.modules["sense_emu"] = MagicMock()
from hardware.SensorPi.sense_pi import SensePi # noqa : E402
from hardware.Utils.logger import Logger # noqa : E402
@patch("hardware.SensorPi.sense_pi.SenseHat")
class SensePiTests(unittest.TestCase):
def setUp(self):
self.temp_dir = TempDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_init_no_logs_no_ids(self, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
expected_sensor_ids = {
"temperature": 1,
"pressure": 2,
"humidity": 3,
"acceleration": 4,
"orientation": 5,
}
expected_color = (87, 46, 140)
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
mock_show_message.assert_called_with(
"MERCURY", text_colour=expected_color, scroll_speed=0.04
)
mock_clear.assert_called()
self.assertTrue(sense.logging is not None)
self.assertTrue(sense.logging.name == "SENSE_HAT_LOG_FILE")
self.assertIsInstance(sense.logging, Logger)
self.assertDictEqual(sense.sensor_ids, expected_sensor_ids)
def test_init_logs_ids(self, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
expected_sensor_ids = {
"temperature": 5,
"pressure": 4,
"humidity": 3,
"acceleration": 2,
"orientation": 1,
}
expected_color = (87, 46, 140)
with patch.dict(
os.environ, {"SENSE_LOG": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path}
):
sense = SensePi(log_file_name="SENSE_LOG", sensor_ids=expected_sensor_ids)
mock_show_message.assert_called_with(
"MERCURY", text_colour=expected_color, scroll_speed=0.04
)
mock_clear.assert_called()
self.assertTrue(sense.logging is not None)
self.assertTrue(sense.logging.name == "SENSE_LOG")
self.assertIsInstance(sense.logging, Logger)
self.assertDictEqual(sense.sensor_ids, expected_sensor_ids)
def test_init_no_logs_ids(self, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
expected_sensor_ids = {
"temperature": 5,
"pressure": 4,
"humidity": 3,
"acceleration": 2,
"orientation": 1,
}
expected_color = (87, 46, 140)
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi(sensor_ids=expected_sensor_ids)
mock_show_message.assert_called_with(
"MERCURY", text_colour=expected_color, scroll_speed=0.04
)
mock_clear.assert_called()
self.assertTrue(sense.logging is not None)
self.assertTrue(sense.logging.name == "SENSE_HAT_LOG_FILE")
self.assertIsInstance(sense.logging, Logger)
self.assertDictEqual(sense.sensor_ids, expected_sensor_ids)
def test_init_logs_no_ids(self, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
expected_sensor_ids = {
"temperature": 1,
"pressure": 2,
"humidity": 3,
"acceleration": 4,
"orientation": 5,
}
expected_color = (87, 46, 140)
with patch.dict(
os.environ, {"SENSE_LOG": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path}
):
sense = SensePi(log_file_name="SENSE_LOG")
mock_show_message.assert_called_with(
"MERCURY", text_colour=expected_color, scroll_speed=0.04
)
mock_clear.assert_called()
self.assertTrue(sense.logging is not None)
self.assertTrue(sense.logging.name == "SENSE_LOG")
self.assertIsInstance(sense.logging, Logger)
self.assertDictEqual(sense.sensor_ids, expected_sensor_ids)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_temp(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("TEMPERATURE")
expected_data = {
"sensor_id": 1,
"values": {"temperature": "100"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_pressure(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("PRESSURE")
expected_data = {
"sensor_id": 2,
"values": {"pressure": "50"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_humidity(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("HUMIDITY")
expected_data = {
"sensor_id": 3,
"values": {"humidity": "20"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_accel(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("ACCELERATION")
expected_data = {
"sensor_id": 4,
"values": {"acceleration": "20"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_orientation(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("ORIENTATION")
expected_data = {
"sensor_id": 5,
"values": {"orientation": (1, 1, 1)},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_invalid_key(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("SOME_KEY")
expected_data = {}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_factory_all(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.factory("ALL")
expected_data = {
"values": {
"temperature": "100",
"pressure": "50",
"humidity": "20",
"acceleration": "20",
"orientation": (1, 1, 1),
},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_all(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_all()
expected_data = {
"values": {
"temperature": "100",
"pressure": "50",
"humidity": "20",
"acceleration": "20",
"orientation": (1, 1, 1),
},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_temp(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_temperature()
expected_data = {
"sensor_id": 1,
"values": {"temperature": "100"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_pressure(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_pressure()
expected_data = {
"sensor_id": 2,
"values": {"pressure": "50"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_humidity(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_humidity()
expected_data = {
"sensor_id": 3,
"values": {"humidity": "20"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_accel(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_acceleration()
expected_data = {
"sensor_id": 4,
"values": {"acceleration": "20"},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
@patch("hardware.SensorPi.sense_pi.date_str_with_current_timezone")
def test_sense_get_orientation(self, mock_date, mock_sense):
mock_show_message = MagicMock()
mock_clear = MagicMock()
mock_sense.return_value.show_message = mock_show_message
mock_sense.return_value.clear = mock_clear
mock_sense.return_value.get_temperature.return_value = "100"
mock_sense.return_value.get_pressure.return_value = "50"
mock_sense.return_value.get_humidity.return_value = "20"
mock_sense.return_value.get_accelerometer_raw.return_value = "20"
mock_sense.return_value.get_orientation.return_value = (1, 1, 1)
date_str = "example_date"
mock_date.return_value = date_str
with patch.dict(
os.environ,
{"SENSE_HAT_LOG_FILE": "logger.txt", "LOG_DIRECTORY": self.temp_dir.path},
):
sense = SensePi()
data = sense.get_orientation()
expected_data = {
"sensor_id": 5,
"values": {"orientation": (1, 1, 1)},
"date": date_str,
}
self.assertDictEqual(expected_data, data)
if __name__ == "__main__":
unittest.main()
| 35.786219
| 88
| 0.620785
| 2,340
| 20,255
| 4.998291
| 0.049573
| 0.157062
| 0.120554
| 0.160739
| 0.961098
| 0.957421
| 0.957421
| 0.957421
| 0.957421
| 0.957421
| 0
| 0.018381
| 0.280178
| 20,255
| 565
| 89
| 35.849558
| 0.783813
| 0.004048
| 0
| 0.832215
| 0
| 0
| 0.126531
| 0.038475
| 0
| 0
| 0
| 0
| 0.082774
| 1
| 0.042506
| false
| 0
| 0.01566
| 0
| 0.060403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
074a2f623048b42ea55b6f1c6854e3e2249783e5
| 3,342
|
py
|
Python
|
noise/simple_signal_regression_limited_noise/fitness_data_reg_limit_5s/fitness_trend.py
|
FraLotito/evol-signal-comm
|
fd06bdad06200a65a8910e8401f0de7632be3cf0
|
[
"BSD-3-Clause"
] | 4
|
2021-07-14T11:16:44.000Z
|
2022-03-11T12:50:47.000Z
|
simple_signal/simple_signal_regression_limited/fitness_data_reg_limit_5s/fitness_trend.py
|
FraLotito/evol-signal-comm
|
fd06bdad06200a65a8910e8401f0de7632be3cf0
|
[
"BSD-3-Clause"
] | null | null | null |
simple_signal/simple_signal_regression_limited/fitness_data_reg_limit_5s/fitness_trend.py
|
FraLotito/evol-signal-comm
|
fd06bdad06200a65a8910e8401f0de7632be3cf0
|
[
"BSD-3-Clause"
] | null | null | null |
import graphviz
import matplotlib.pyplot as plt
import numpy as np
NUMBER_OF_GENERATIONS = 200
f = open("sender.mean", 'r')
stats = f.readlines()
f.close()
gen = range(NUMBER_OF_GENERATIONS)
gen_stats = []
for i in gen:
gen_stats.append([])
for run in stats:
run = list(map(float, run.split()))
run = run[:NUMBER_OF_GENERATIONS]
for i in range(len(run)):
gen_stats[i].append(run[i])
mean_gen = []
stddev_gen = []
for i in range(len(gen_stats)):
mean_gen.append(np.mean(gen_stats[i]))
stddev_gen.append(np.std(gen_stats[i]))
f = open("sender.best", 'r')
stats = f.readlines()
f.close()
gen = range(NUMBER_OF_GENERATIONS)
gen_stats = []
for i in gen:
gen_stats.append([])
for run in stats:
run = list(map(float, run.split()))
run = run[:NUMBER_OF_GENERATIONS]
for i in range(len(run)):
gen_stats[i].append(run[i])
mean_best_gen = []
stddev_best_gen = []
for i in range(len(gen_stats)):
mean_best_gen.append(np.mean(gen_stats[i]))
stddev_best_gen.append(np.std(gen_stats[i]))
plt.plot(gen, mean_best_gen, color='green', label='best pair avg')
plt.fill_between(gen, [a_i - b_i for a_i, b_i in zip(mean_best_gen, stddev_best_gen)], [a_i + b_i for a_i, b_i in zip(mean_best_gen, stddev_best_gen)], alpha=0.35, color='green')
plt.plot(gen, mean_gen, color='red', label='senders population avg')
plt.fill_between(gen, [a_i - b_i for a_i, b_i in zip(mean_gen, stddev_gen)], [a_i + b_i for a_i, b_i in zip(mean_gen, stddev_gen)], alpha=0.35, color='red')
#plt.errorbar(gen, mean_best_gen, yerr=stddev_best_gen, label="best genome", fmt='.', capsize=2, barsabove=True, errorevery=2)
#plt.errorbar(gen, mean_gen, yerr=stddev_gen, label="snd population's avg", fmt='.', capsize=2, barsabove=True, errorevery=2)
f = open("receiver.mean", 'r')
stats = f.readlines()
f.close()
gen = range(NUMBER_OF_GENERATIONS)
gen_stats = []
for i in gen:
gen_stats.append([])
for run in stats:
run = list(map(float, run.split()))
run = run[:NUMBER_OF_GENERATIONS]
for i in range(len(run)):
gen_stats[i].append(run[i])
mean_gen = []
stddev_gen = []
for i in range(len(gen_stats)):
mean_gen.append(np.mean(gen_stats[i]))
stddev_gen.append(np.std(gen_stats[i]))
f = open("receiver.best", 'r')
stats = f.readlines()
f.close()
gen = range(NUMBER_OF_GENERATIONS)
gen_stats = []
for i in gen:
gen_stats.append([])
for run in stats:
run = list(map(float, run.split()))
run = run[:NUMBER_OF_GENERATIONS]
for i in range(len(run)):
gen_stats[i].append(run[i])
mean_best_gen = []
stddev_best_gen = []
for i in range(len(gen_stats)):
mean_best_gen.append(np.mean(gen_stats[i]))
stddev_best_gen.append(np.std(gen_stats[i]))
plt.plot(gen, mean_gen, color='blue', label='receivers population avg')
plt.fill_between(gen, [a_i - b_i for a_i, b_i in zip(mean_gen, stddev_gen)], [a_i + b_i for a_i, b_i in zip(mean_gen, stddev_gen)], alpha=0.35, color='blue')
#plt.errorbar(gen, mean_gen, yerr=stddev_gen, label="rcv population's avg", fmt='.', capsize=2, barsabove=True, errorevery=2)
plt.xlabel("Generations")
plt.xticks(range(NUMBER_OF_GENERATIONS + 1))
plt.ylabel("Populations' avg & best pair fitness")
locs, labs = plt.xticks()
plt.xticks(locs[::25])
plt.legend(loc="best")
plt.savefig('fitness_data_reg_limit_5s.pdf')
plt.show()
| 28.564103
| 178
| 0.689408
| 589
| 3,342
| 3.696095
| 0.144312
| 0.088195
| 0.033073
| 0.022049
| 0.801562
| 0.79559
| 0.78181
| 0.78181
| 0.764355
| 0.728525
| 0
| 0.0077
| 0.145123
| 3,342
| 116
| 179
| 28.810345
| 0.754288
| 0.11161
| 0
| 0.744186
| 0
| 0
| 0.072513
| 0.009781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034884
| 0
| 0.034884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4af2c49e953080fe8fbb31d15698f98b266a333a
| 172
|
py
|
Python
|
example/__init__.py
|
FebruaryBreeze/yaml-to-object
|
d91206810b4828533c6d78e130cd1e20025019c7
|
[
"MIT"
] | 3
|
2019-02-21T06:39:15.000Z
|
2019-05-14T08:39:53.000Z
|
example/__init__.py
|
FebruaryBreeze/yaml-to-object
|
d91206810b4828533c6d78e130cd1e20025019c7
|
[
"MIT"
] | null | null | null |
example/__init__.py
|
FebruaryBreeze/yaml-to-object
|
d91206810b4828533c6d78e130cd1e20025019c7
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import yaml_to_object
if yaml_to_object.generate(Path(__file__).parent / 'example.yml', suffix='config'):
from .build import * # noqa: F403
| 24.571429
| 83
| 0.744186
| 25
| 172
| 4.8
| 0.72
| 0.1
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.145349
| 172
| 6
| 84
| 28.666667
| 0.795918
| 0.05814
| 0
| 0
| 1
| 0
| 0.10625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ab0ea5dbddc030e88f2055a27f2aa7912907a8b0
| 3,903
|
py
|
Python
|
tests/data/hash/test_hash.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 2
|
2021-04-28T10:46:08.000Z
|
2021-12-22T12:33:34.000Z
|
tests/data/hash/test_hash.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 321
|
2020-06-15T11:48:21.000Z
|
2022-03-29T22:13:33.000Z
|
tests/data/hash/test_hash.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 4
|
2020-06-18T06:19:29.000Z
|
2021-07-14T12:54:47.000Z
|
# coding: utf-8
import hashlib
import pytest
import os
import tempfile
from jumpscale.loader import j
@pytest.fixture
def make_list():
hashes_list = [
"md5",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"sha3_224",
"sha3_256",
"sha3_384",
"sha3_512",
"blake2s",
"blake2b",
]
return hashes_list
def test_hash_strings():
test_string = "my company is codescalers"
assert j.data.hash.md5(test_string) == hashlib.md5(test_string.encode()).hexdigest()
assert j.data.hash.sha1(test_string) == hashlib.sha1(test_string.encode()).hexdigest()
assert j.data.hash.sha224(test_string) == hashlib.sha224(test_string.encode()).hexdigest()
assert j.data.hash.sha256(test_string) == hashlib.sha256(test_string.encode()).hexdigest()
assert j.data.hash.sha384(test_string) == hashlib.sha384(test_string.encode()).hexdigest()
assert j.data.hash.sha512(test_string) == hashlib.sha512(test_string.encode()).hexdigest()
assert j.data.hash.sha3_224(test_string) == hashlib.sha3_224(test_string.encode()).hexdigest()
assert j.data.hash.sha3_256(test_string) == hashlib.sha3_256(test_string.encode()).hexdigest()
assert j.data.hash.sha3_384(test_string) == hashlib.sha3_384(test_string.encode()).hexdigest()
assert j.data.hash.sha3_512(test_string) == hashlib.sha3_512(test_string.encode()).hexdigest()
assert j.data.hash.blake2b(test_string) == hashlib.blake2b(test_string.encode()).hexdigest()
assert j.data.hash.blake2s(test_string) == hashlib.blake2s(test_string.encode()).hexdigest()
assert j.data.hash.shake_128(test_string) == hashlib.shake_128(test_string.encode()).hexdigest(16)
assert j.data.hash.shake_256(test_string) == hashlib.shake_256(test_string.encode()).hexdigest(16)
def test_hash_bytes():
test_string = b"hi there"
assert j.data.hash.md5(test_string) == hashlib.md5(test_string).hexdigest()
assert j.data.hash.sha1(test_string) == hashlib.sha1(test_string).hexdigest()
assert j.data.hash.sha224(test_string) == hashlib.sha224(test_string).hexdigest()
assert j.data.hash.sha256(test_string) == hashlib.sha256(test_string).hexdigest()
assert j.data.hash.sha384(test_string) == hashlib.sha384(test_string).hexdigest()
assert j.data.hash.sha512(test_string) == hashlib.sha512(test_string).hexdigest()
assert j.data.hash.sha3_224(test_string) == hashlib.sha3_224(test_string).hexdigest()
assert j.data.hash.sha3_256(test_string) == hashlib.sha3_256(test_string).hexdigest()
assert j.data.hash.sha3_384(test_string) == hashlib.sha3_384(test_string).hexdigest()
assert j.data.hash.sha3_512(test_string) == hashlib.sha3_512(test_string).hexdigest()
assert j.data.hash.blake2b(test_string) == hashlib.blake2b(test_string).hexdigest()
assert j.data.hash.blake2s(test_string) == hashlib.blake2s(test_string).hexdigest()
assert j.data.hash.shake_128(test_string) == hashlib.shake_128(test_string).hexdigest(16)
assert j.data.hash.shake_256(test_string) == hashlib.shake_256(test_string).hexdigest(16)
def test_hash_files(make_list):
with tempfile.NamedTemporaryFile("w") as tf:
tf.write("Your text goes here")
tf.flush()
for h in make_list:
ha = hashlib.new(h)
ha.update("Your text goes here".encode(tf.encoding))
assert j.data.hash.hash_file(tf.name, h).hexdigest() == ha.hexdigest()
def test_hash_dir(make_list):
dir_path = tempfile.mkdtemp()
with tempfile.NamedTemporaryFile("w", dir=dir_path) as tf:
tf.write("عربى english")
tf.flush()
for h in make_list:
ha = hashlib.new(h)
ha.update("عربى english".encode())
base_hexdigest = ha.hexdigest()
for value in j.data.hash.hash_directory(dir_path, h).values():
assert value == base_hexdigest
| 44.862069
| 102
| 0.702024
| 548
| 3,903
| 4.80292
| 0.145985
| 0.220365
| 0.102584
| 0.165274
| 0.75304
| 0.731763
| 0.731763
| 0.731763
| 0.713526
| 0.704407
| 0
| 0.058414
| 0.153472
| 3,903
| 86
| 103
| 45.383721
| 0.738196
| 0.003331
| 0
| 0.082192
| 0
| 0
| 0.044753
| 0
| 0
| 0
| 0
| 0
| 0.410959
| 1
| 0.068493
| false
| 0
| 0.068493
| 0
| 0.150685
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ab54c644a3cce8562ca8b7ea333b5f24ab4a3726
| 47,121
|
py
|
Python
|
TEST3D/GUI/0010003_page_micro/junk.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 31
|
2015-04-01T15:59:36.000Z
|
2022-03-18T20:21:47.000Z
|
TEST3D/GUI/0010003_page_micro/junk.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 3
|
2015-02-06T19:30:24.000Z
|
2017-05-25T14:14:31.000Z
|
TEST3D/GUI/0010003_page_micro/junk.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 7
|
2015-01-23T15:19:22.000Z
|
2021-06-09T09:03:59.000Z
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# We test how voxels selection groups are handled in a microstructure.
# Meaning what is the difference between Remove and Clear but also how can we provide some selections:
# graphicaly or by the Voxel Selection Page using a specific mehtod.
# This test is mainly directed to the way that the selections are handle through the voxels group.
import tests
findWidget('OOF3D').resize(550, 350)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
# create a microstructure with loaded files
findWidget('OOF3D:Microstructure Page:NewFromFile').clicked()
checkpoint toplevel widget mapped Dialog-Load Image and create Microstructure
findWidget('Dialog-Load Image and create Microstructure').resize(401, 215)
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('.')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('..')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../.')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../..')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3D')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DS')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSa')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSan')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSand')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandb')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbo')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5c')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5co')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5col')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5colo')
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('../../3DSandbox/5color')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Auto').clicked()
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('m')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('mi')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('mic')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('micr')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('micro')
findWidget('Dialog-Load Image and create Microstructure:gtk-ok').clicked()
findWidget('OOF3D Messages 1').resize(603, 200)
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint active area status updated
checkpoint pixel page updated
checkpoint mesh bdy page updated
checkpoint microstructure page sensitized
checkpoint Field page sensitized
checkpoint meshable button set
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint OOF.Microstructure.Create_From_ImageFile
# check that the new group button is sensitized
assert tests.sensitization1()
# open a graphics window
findMenu(findWidget('OOF3D:MenuBar'), 'Windows:Graphics:New').activate()
checkpoint Move Node toolbox info updated
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
checkpoint toplevel widget mapped OOF3D Graphics 1
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1').resize(1000, 800)
checkpoint OOF.Windows.Graphics.New
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1').resize(1000, 800)
findWidget('OOF3D').resize(550, 350)
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
# do some voxels selection
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:TBChooser'), 'Voxel Selection')
findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
findWidget('OOF3D Graphics 1:Pane0:Pane1').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 705))
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8490000000000e+02,y= 2.56900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.84900000000000e+02,y= 2.56900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2100000000000e+02,y= 2.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2100000000000e+02,y= 2.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4000000000000e+02,y= 2.4300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4000000000000e+02,y= 2.4300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7900000000000e+02,y= 2.4600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7900000000000e+02,y= 2.4600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5900000000000e+02,y= 2.4500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5900000000000e+02,y= 2.4500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7700000000000e+02,y= 2.7000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7700000000000e+02,y= 2.7000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8300000000000e+02,y= 2.9600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8300000000000e+02,y= 2.9600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0700000000000e+02,y= 3.1400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0700000000000e+02,y= 3.1400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3100000000000e+02,y= 3.3900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3100000000000e+02,y= 3.3900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8800000000000e+02,y= 2.6600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8800000000000e+02,y= 2.6600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8200000000000e+02,y= 2.9600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8200000000000e+02,y= 2.9600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6400000000000e+02,y= 3.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6400000000000e+02,y= 3.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0600000000000e+02,y= 3.7000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0600000000000e+02,y= 3.7000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6200000000000e+02,y= 3.7200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6200000000000e+02,y= 3.7200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8300000000000e+02,y= 3.9600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8300000000000e+02,y= 3.9600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8000000000000e+02,y= 4.2400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8000000000000e+02,y= 4.2400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8200000000000e+02,y= 4.5400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8200000000000e+02,y= 4.5400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0600000000000e+02,y= 4.5400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0600000000000e+02,y= 4.5400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3600000000000e+02,y= 4.5100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3600000000000e+02,y= 4.5100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6800000000000e+02,y= 4.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6800000000000e+02,y= 4.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8800000000000e+02,y= 4.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8800000000000e+02,y= 4.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8600000000000e+02,y= 3.9500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8600000000000e+02,y= 3.9500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8600000000000e+02,y= 4.2700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8600000000000e+02,y= 4.2700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# # create a voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
# checkpoint toplevel widget mapped Dialog-Create new voxel group
# findWidget('Dialog-Create new voxel group').resize(246, 67)
# findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
# findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
# checkpoint meshable button set
# checkpoint meshable button set
# checkpoint microstructure page sensitized
# checkpoint skeleton selection page groups sensitized
# checkpoint OOF.PixelGroup.New
# checkpoint microstructure page sensitized
# checkpoint meshable button set
# # A group has been created in micro, but no voxels have been added
# assert tests.sensitization3()
# assert tests.meshableCheck(1)
# assert tests.voxelGroupListCheck('pixelgroup (0 voxels, meshable)')
# assert tests.selectedGroupCheck('pixelgroup (0 voxels, meshable)')
# # create another voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
# checkpoint toplevel widget mapped Dialog-Create new voxel group
# findWidget('Dialog-Create new voxel group').resize(246, 67)
# findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
# checkpoint meshable button set
# checkpoint meshable button set
# checkpoint microstructure page sensitized
# checkpoint skeleton selection page groups sensitized
# checkpoint OOF.PixelGroup.New
# # another group has been created but still no added voxels
# assert tests.meshableCheck(1)
# assert tests.voxelGroupListCheck('pixelgroup (0 voxels, meshable)', 'pixelgroup<2> (0 voxels, meshable)')
# assert tests.selectedGroupCheck('pixelgroup<2> (0 voxels, meshable)')
# # select the first created voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:GroupListScroll:GroupList').get_selection().select_path((0,))
# checkpoint microstructure page sensitized
# checkpoint meshable button set
# # add the voxel selection the first create voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
# checkpoint meshable button set
# checkpoint meshable button set
# checkpoint microstructure page sensitized
# checkpoint OOF.PixelGroup.AddSelection
# # do some more voxels selection
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.0600000000000e+02,y= 3.2500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.0600000000000e+02,y= 3.2500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.3300000000000e+02,y= 3.5000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.3300000000000e+02,y= 3.5000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.0800000000000e+02,y= 3.7800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.0800000000000e+02,y= 3.7800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.5900000000000e+02,y= 3.7700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.5900000000000e+02,y= 3.7700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.6200000000000e+02,y= 3.2400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.6200000000000e+02,y= 3.2400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.8600000000000e+02,y= 3.0900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.8600000000000e+02,y= 3.0900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:TBScroll:Voxel Selection:Undo').clicked()
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.8000000000000e+02,y= 3.0100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.8000000000000e+02,y= 3.0100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.1600000000000e+02,y= 2.9200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.1600000000000e+02,y= 2.9200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.8900000000000e+02,y= 3.9300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.8900000000000e+02,y= 3.9300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.1300000000000e+02,y= 3.9500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.1300000000000e+02,y= 3.9500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.3600000000000e+02,y= 3.9700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.3600000000000e+02,y= 3.9700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.3500000000000e+02,y= 2.9400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.3500000000000e+02,y= 2.9400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.4400000000000e+02,y= 3.1700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.4400000000000e+02,y= 3.1700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.4200000000000e+02,y= 3.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.4200000000000e+02,y= 3.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 5.4000000000000e+02,y= 3.7400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.4000000000000e+02,y= 3.7400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5700000000000e+02,y= 3.2200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5700000000000e+02,y= 3.2200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2000000000000e+02,y= 3.4300000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2000000000000e+02,y= 3.4300000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5000000000000e+02,y= 3.7700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5000000000000e+02,y= 3.7700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.0200000000000e+02,y= 3.6900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.0200000000000e+02,y= 3.6900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9800000000000e+02,y= 3.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.9800000000000e+02,y= 3.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7300000000000e+02,y= 2.9900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.7300000000000e+02,y= 2.9900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.8100000000000e+02,y= 3.9400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.8100000000000e+02,y= 3.9400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.5400000000000e+02,y= 4.0000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5400000000000e+02,y= 4.0000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.2600000000000e+02,y= 3.9900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2600000000000e+02,y= 3.9900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.5300000000000e+02,y= 2.9400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5300000000000e+02,y= 2.9400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.2200000000000e+02,y= 2.9500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2200000000000e+02,y= 2.9500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.2400000000000e+02,y= 3.7600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2400000000000e+02,y= 3.7600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.2300000000000e+02,y= 3.4200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2300000000000e+02,y= 3.4200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.2400000000000e+02,y= 3.1800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
# canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:Canvas'), windowname='Graphics_1')
# canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2400000000000e+02,y= 3.1800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
# # select the second voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:GroupListScroll:GroupList').get_selection().select_path((1,))
# checkpoint microstructure page sensitized
# checkpoint meshable button set
# # now the current select voxels group has some voxels selection added
# assert tests.meshableCheck(1)
# assert tests.voxelGroupListCheck('pixelgroup (23 voxels, meshable)', 'pixelgroup<2> (0 voxels, meshable)')
# assert tests.selectedGroupCheck('pixelgroup<2> (0 voxels, meshable)')
# assert tests.sensitization3()
# # add the new selection to the select voxels group
# findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
# checkpoint meshable button set
# checkpoint meshable button set
# checkpoint microstructure page sensitized
# checkpoint OOF.PixelGroup.AddSelection
# # now the two created voxels groups have some voxels selection added
# assert tests.meshableCheck(1)
# assert tests.voxelGroupListCheck('pixelgroup (23 voxels, meshable)', 'pixelgroup<2> (51 voxels, meshable)')
# assert tests.selectedGroupCheck('pixelgroup<2> (51 voxels, meshable)')
# assert tests.sensitization4()
# # undo the last added voxels
# findWidget('OOF3D Graphics 1:Pane0:Pane1:Pane2:TBScroll:Voxel Selection:Undo').clicked()
# checkpoint microstructure page sensitized
# checkpoint pixel page updated
# checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
| 84.597846
| 162
| 0.803527
| 6,101
| 47,121
| 6.128176
| 0.051467
| 0.092436
| 0.075104
| 0.077672
| 0.952659
| 0.946988
| 0.941345
| 0.931903
| 0.920376
| 0.91861
| 0
| 0.108764
| 0.063814
| 47,121
| 556
| 163
| 84.75
| 0.738768
| 0.816494
| 0
| 0.42268
| 0
| 0
| 0.421059
| 0.138848
| 0
| 0
| 0
| 0
| 0.010309
| 0
| null | null | 0
| 0.010309
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ab6356639b170b0c2ce0e54e646136d0179813ca
| 778
|
py
|
Python
|
lib_pypy/pypy_test/inprogress_test_binascii_extra.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
lib/lib_pypy/pypy_test/inprogress_test_binascii_extra.py
|
ojii/sandlib
|
f822eb308a86e413076c185724bd28a450c59187
|
[
"BSD-3-Clause"
] | null | null | null |
lib/lib_pypy/pypy_test/inprogress_test_binascii_extra.py
|
ojii/sandlib
|
f822eb308a86e413076c185724bd28a450c59187
|
[
"BSD-3-Clause"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from __future__ import absolute_import
from lib_pypy import binascii
def test_uu():
assert binascii.b2a_uu('1234567') == "',3(S-#4V-P \n"
assert binascii.b2a_uu('123456789012345678901234567890123456789012345') == 'M,3(S-#4V-S@Y,#$R,S0U-C<X.3 Q,C,T-38W.#DP,3(S-#4V-S@Y,#$R,S0U\n'
try:
assert binascii.b2a_uu('1234567890123456789012345678901234567890123456')
except binascii.Error:
pass
else:
assert False, "Expected binascii.Error on oversize input."
assert binascii.b2a_uu('1234567') == "',3(S-#4V-P \n"
assert binascii.b2a_uu('123456789012345678901234567890123456789012345') == 'M,3(S-#4V-S@Y,#$R,S0U-C<X.3 Q,C,T-38W.#DP,3(S-#4V-S@Y,#$R,S0U\n'
def test_base64():
assert binascii.b2a_base64('xxxx') == 'eHh4eA==\n'
| 38.9
| 145
| 0.674807
| 120
| 778
| 4.258333
| 0.358333
| 0.164384
| 0.199609
| 0.18591
| 0.504892
| 0.504892
| 0.504892
| 0.504892
| 0.504892
| 0.504892
| 0
| 0.275188
| 0.145244
| 778
| 19
| 146
| 40.947368
| 0.493233
| 0
| 0
| 0.266667
| 0
| 0.133333
| 0.465894
| 0.33462
| 0
| 0
| 0
| 0
| 0.466667
| 1
| 0.133333
| true
| 0.066667
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
db5a7bb5eb29bdd8389bf3d60deed60e52eceafe
| 154
|
py
|
Python
|
neuro_pypes/dmri/__init__.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 14
|
2015-11-30T19:32:08.000Z
|
2021-11-16T05:35:20.000Z
|
neuro_pypes/dmri/__init__.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 42
|
2015-11-28T23:18:42.000Z
|
2021-02-23T01:45:02.000Z
|
neuro_pypes/dmri/__init__.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 9
|
2015-12-09T17:10:59.000Z
|
2022-01-03T17:26:40.000Z
|
# -*- coding: utf-8 -*-
from neuro_pypes.dmri.dti import attach_spm_fsl_dti_preprocessing
from neuro_pypes.dmri.camino import attach_camino_tractography
| 30.8
| 65
| 0.824675
| 23
| 154
| 5.173913
| 0.652174
| 0.151261
| 0.235294
| 0.302521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.090909
| 154
| 4
| 66
| 38.5
| 0.842857
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
db80163bcda70151823b7f6651cf74f3cc531806
| 139
|
py
|
Python
|
algorithms/1281. Subtract the Product and Sum of Digits of an Integer.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1
|
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/1281. Subtract the Product and Sum of Digits of an Integer.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/1281. Subtract the Product and Sum of Digits of an Integer.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
class Solution:
def subtractProductAndSum(self, n: int) -> int:
return reduce(mul, map(int, str(n))) - reduce(add, map(int, str(n)))
| 34.75
| 72
| 0.661871
| 21
| 139
| 4.380952
| 0.619048
| 0.130435
| 0.195652
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158273
| 139
| 3
| 73
| 46.333333
| 0.786325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9166061a884ef72479f68d40b9d9037a62e4b77a
| 21,024
|
py
|
Python
|
reviewboard/hostingsvcs/tests/test_client.py
|
Xtuden-com/reviewboard
|
f7c45c8fb6588a462c2cbe0c33d83c7e1035d999
|
[
"MIT"
] | null | null | null |
reviewboard/hostingsvcs/tests/test_client.py
|
Xtuden-com/reviewboard
|
f7c45c8fb6588a462c2cbe0c33d83c7e1035d999
|
[
"MIT"
] | null | null | null |
reviewboard/hostingsvcs/tests/test_client.py
|
Xtuden-com/reviewboard
|
f7c45c8fb6588a462c2cbe0c33d83c7e1035d999
|
[
"MIT"
] | null | null | null |
"""Test cases for the hosting service client support."""
from __future__ import unicode_literals
from kgb import SpyAgency
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.service import (HostingService,
HostingServiceClient,
HostingServiceHTTPRequest,
HostingServiceHTTPResponse)
from reviewboard.testing.testcase import TestCase
class DummyHTTPRequest(HostingServiceHTTPRequest):
def open(self):
method = self.method
if method in ('DELETE', 'HEAD'):
data = None
else:
data = b'test response'
if method == 'DELETE':
status_code = 204
elif method == 'POST':
status_code = 201
else:
status_code = 200
return HostingServiceHTTPResponse(
request=self,
url=self.url,
data=data,
headers={
str('Test-header'): str('Value'),
},
status_code=status_code)
class HostingServiceHTTPRequestTests(TestCase):
"""Unit tests for HostingServiceHTTPRequest."""
def test_init_with_query(self):
"""Testing HostingServiceHTTPRequest construction with query="""
request = HostingServiceHTTPRequest(
url='http://example.com?z=1&z=2&baz=true',
query={
'foo': 'bar',
'a': 10,
'list': ['a', 'b', 'c'],
})
self.assertEqual(
request.url,
'http://example.com?a=10&baz=true&foo=bar&list=a&list=b&list=c'
'&z=1&z=2')
def test_add_basic_auth(self):
"""Testing HostingServiceHTTPRequest.add_basic_auth"""
request = HostingServiceHTTPRequest('http://example.com')
request.add_basic_auth(b'username', b'password')
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
})
def test_get_header(self):
"""Testing HostingServiceHTTPRequest.get_header"""
request = HostingServiceHTTPRequest(
'http://example.com',
headers={
'Authorization': 'Basic abc123',
'Content-Length': '123',
})
self.assertEqual(request.get_header('Authorization'), 'Basic abc123')
self.assertEqual(request.get_header('AUTHORIZATION'), 'Basic abc123')
self.assertEqual(request.get_header('authorization'), 'Basic abc123')
self.assertEqual(request.get_header('Content-Length'), '123')
self.assertEqual(request.get_header('CONTENT-LENGTH'), '123')
self.assertEqual(request.get_header('content-length'), '123')
class HostingServiceHTTPResponseTests(TestCase):
"""Unit tests for HostingServiceHTTPResponse."""
def test_json(self):
"""Testing HostingServiceHTTPResponse.json"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(request=request,
url='http://example.com',
data=b'{"a": 1, "b": 2}',
headers={},
status_code=200)
self.assertEqual(
response.json,
{
'a': 1,
'b': 2,
})
def test_json_with_non_json_response(self):
"""Testing HostingServiceHTTPResponse.json with non-JSON response"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(request=request,
url='http://example.com',
data=b'{[(',
headers={},
status_code=200)
with self.assertRaises(ValueError):
response.json
def test_get_header(self):
"""Testing HostingServiceHTTPRequest.get_header"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(
request=request,
url=request.url,
status_code=200,
data=b'',
headers={
str('Authorization'): str('Basic abc123'),
str('Content-Length'): str('123'),
})
self.assertEqual(response.get_header('Authorization'), 'Basic abc123')
self.assertEqual(response.get_header('AUTHORIZATION'), 'Basic abc123')
self.assertEqual(response.get_header('authorization'), 'Basic abc123')
self.assertEqual(response.get_header('Content-Length'), '123')
self.assertEqual(response.get_header('CONTENT-LENGTH'), '123')
self.assertEqual(response.get_header('content-length'), '123')
class HostingServiceClientTests(SpyAgency, TestCase):
"""Unit tests for HostingServiceClient"""
def setUp(self):
super(HostingServiceClientTests, self).setUp()
account = HostingServiceAccount()
service = HostingService(account)
self.client = HostingServiceClient(service)
self.client.http_request_cls = DummyHTTPRequest
def test_http_delete(self):
"""Testing HostingServiceClient.http_delete"""
self.spy_on(self.client.build_http_request)
response = self.client.http_delete(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertIsNone(response.data)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
credentials={
'username': 'username',
'password': 'password',
}))
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'DELETE')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_get(self):
"""Testing HostingServiceClient.http_get"""
self.spy_on(self.client.build_http_request)
response = self.client.http_get(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
method='GET',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'GET')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_head(self):
"""Testing HostingServiceClient.http_head"""
self.spy_on(self.client.build_http_request)
response = self.client.http_head(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertIsNone(response.data)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
method='HEAD',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'HEAD')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_post_with_body_unicode(self):
"""Testing HostingServiceClient.http_post with body as Unicode"""
self.spy_on(self.client.build_http_request)
response = self.client.http_post(
url='http://example.com',
body='test body\U0001f60b',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 201)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=b'test body\xf0\x9f\x98\x8b',
headers={
'Content-Length': '13',
'Foo': 'bar',
},
method='POST',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\xf0\x9f\x98\x8b')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '13',
'Foo': 'bar',
})
def test_http_post_with_body_bytes(self):
"""Testing HostingServiceClient.http_post with body as bytes"""
self.spy_on(self.client.build_http_request)
response = self.client.http_post(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 201)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Content-Length': '12',
'Foo': 'bar',
},
method='POST',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\x01\x02\x03')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '12',
'Foo': 'bar',
})
def test_http_put_with_body_unicode(self):
"""Testing HostingServiceClient.http_put with body as Unicode"""
self.spy_on(self.client.build_http_request)
response = self.client.http_put(
url='http://example.com',
body='test body\U0001f60b',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=b'test body\xf0\x9f\x98\x8b',
headers={
'Content-Length': '13',
'Foo': 'bar',
},
method='PUT',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'PUT')
self.assertEqual(request.data, b'test body\xf0\x9f\x98\x8b')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '13',
'Foo': 'bar',
})
def test_http_put_with_body_bytes(self):
"""Testing HostingServiceClient.http_put with body as bytes"""
self.spy_on(self.client.build_http_request)
response = self.client.http_put(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Content-Length': '12',
'Foo': 'bar',
},
method='PUT',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'PUT')
self.assertEqual(request.data, b'test body\x01\x02\x03')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '12',
'Foo': 'bar',
})
def test_http_request(self):
"""Testing HostingServiceClient.http_request"""
self.spy_on(self.client.build_http_request)
response = self.client.http_request(
url='http://example.com',
body=b'test',
headers={
'Foo': 'bar',
},
method='BAZ',
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'test response')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertTrue(self.client.build_http_request.called_with(
url='http://example.com',
body=b'test',
headers={
'Foo': 'bar',
},
method='BAZ',
username='username',
password='password'))
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'BAZ')
self.assertEqual(request.data, b'test')
self.assertIsInstance(response.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_build_http_request(self):
"""Testing HostingServiceClient.build_http_request"""
request = self.client.build_http_request(
url='http://example.com',
body=b'test',
method='POST',
credentials={},
headers={
'Foo': 'bar',
})
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.data, b'test')
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.headers,
{
'Foo': 'bar',
})
def test_build_http_request_with_basic_auth(self):
"""Testing HostingServiceClient.build_http_request with username and
password
"""
request = self.client.build_http_request(
url='http://example.com',
body=b'test',
method='POST',
headers={
'Foo': 'bar',
},
credentials={
'username': 'username',
'password': 'password',
})
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.data, b'test')
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
| 34.807947
| 78
| 0.557934
| 1,885
| 21,024
| 6.117772
| 0.065782
| 0.127471
| 0.054631
| 0.058966
| 0.836975
| 0.825702
| 0.813996
| 0.795179
| 0.770638
| 0.765956
| 0
| 0.015173
| 0.322869
| 21,024
| 603
| 79
| 34.865672
| 0.794886
| 0.047041
| 0
| 0.727455
| 0
| 0.002004
| 0.146357
| 0.01205
| 0
| 0
| 0
| 0
| 0.272545
| 1
| 0.036072
| false
| 0.036072
| 0.01002
| 0
| 0.056112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91a87bc90a279e442b313e5f04df1f8b8bca4592
| 170,124
|
py
|
Python
|
stubs/System/CodeDom/Compiler.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs/System/CodeDom/Compiler.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs/System/CodeDom/Compiler.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module System.CodeDom.Compiler calls itself Compiler
# from System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class ICodeGenerator:
""" Defines an interface for generating code. """
def CreateEscapedIdentifier(self, value):
"""
CreateEscapedIdentifier(self: ICodeGenerator, value: str) -> str
Creates an escaped identifier for the specified value.
value: The string to create an escaped identifier for.
Returns: The escaped identifier for the value.
"""
pass
def CreateValidIdentifier(self, value):
"""
CreateValidIdentifier(self: ICodeGenerator, value: str) -> str
Creates a valid identifier for the specified value.
value: The string to generate a valid identifier for.
Returns: A valid identifier for the specified value.
"""
pass
def GenerateCodeFromCompileUnit(self, e, w, o):
"""
GenerateCodeFromCompileUnit(self: ICodeGenerator, e: CodeCompileUnit, w: TextWriter, o: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM)
compilation unit and outputs it to the specified text writer using the
specified options.
e: A System.CodeDom.CodeCompileUnit to generate code for.
w: The System.IO.TextWriter to output code to.
o: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromExpression(self, e, w, o):
"""
GenerateCodeFromExpression(self: ICodeGenerator, e: CodeExpression, w: TextWriter, o: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM)
expression and outputs it to the specified text writer.
e: A System.CodeDom.CodeExpression that indicates the expression to generate code
for.
w: The System.IO.TextWriter to output code to.
o: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromNamespace(self, e, w, o):
"""
GenerateCodeFromNamespace(self: ICodeGenerator, e: CodeNamespace, w: TextWriter, o: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) namespace
and outputs it to the specified text writer using the specified options.
e: A System.CodeDom.CodeNamespace that indicates the namespace to generate code
for.
w: The System.IO.TextWriter to output code to.
o: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromStatement(self, e, w, o):
"""
GenerateCodeFromStatement(self: ICodeGenerator, e: CodeStatement, w: TextWriter, o: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) statement
and outputs it to the specified text writer using the specified options.
e: A System.CodeDom.CodeStatement containing the CodeDOM elements to translate.
w: The System.IO.TextWriter to output code to.
o: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromType(self, e, w, o):
"""
GenerateCodeFromType(self: ICodeGenerator, e: CodeTypeDeclaration, w: TextWriter, o: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) type
declaration and outputs it to the specified text writer using the specified
options.
e: A System.CodeDom.CodeTypeDeclaration that indicates the type to generate code
for.
w: The System.IO.TextWriter to output code to.
o: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GetTypeOutput(self, type):
"""
GetTypeOutput(self: ICodeGenerator, type: CodeTypeReference) -> str
Gets the type indicated by the specified System.CodeDom.CodeTypeReference.
type: A System.CodeDom.CodeTypeReference that indicates the type to return.
Returns: A text representation of the specified type for the language this code
generator is designed to generate code in. For example, in Visual Basic,
passing in type System.Int32 will return "Integer".
"""
pass
def IsValidIdentifier(self, value):
"""
IsValidIdentifier(self: ICodeGenerator, value: str) -> bool
Gets a value that indicates whether the specified value is a valid identifier
for the current language.
value: The value to test for being a valid identifier.
Returns: true if the value parameter is a valid identifier; otherwise, false.
"""
pass
def Supports(self, supports):
"""
Supports(self: ICodeGenerator, supports: GeneratorSupport) -> bool
Gets a value indicating whether the generator provides support for the language
features represented by the specified System.CodeDom.Compiler.GeneratorSupport
object.
supports: The capabilities to test the generator for.
Returns: true if the specified capabilities are supported; otherwise, false.
"""
pass
def ValidateIdentifier(self, value):
"""
ValidateIdentifier(self: ICodeGenerator, value: str)
Throws an exception if the specified value is not a valid identifier.
value: The identifier to validate.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class CodeGenerator(object, ICodeGenerator):
""" Provides an example implementation of the System.CodeDom.Compiler.ICodeGenerator interface. This class is abstract. """
def ContinueOnNewLine(self, *args): #cannot find CLR method
"""
ContinueOnNewLine(self: CodeGenerator, st: str)
Generates a line-continuation character and outputs the specified string on a
new line.
st: The string to write on the new line.
"""
pass
def CreateEscapedIdentifier(self, *args): #cannot find CLR method
"""
CreateEscapedIdentifier(self: CodeGenerator, value: str) -> str
Creates an escaped identifier for the specified value.
value: The string to create an escaped identifier for.
Returns: The escaped identifier for the value.
"""
pass
def CreateValidIdentifier(self, *args): #cannot find CLR method
"""
CreateValidIdentifier(self: CodeGenerator, value: str) -> str
Creates a valid identifier for the specified value.
value: A string to create a valid identifier for.
Returns: A valid identifier for the value.
"""
pass
def GenerateArgumentReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateArgumentReferenceExpression(self: CodeGenerator, e: CodeArgumentReferenceExpression)
Generates code for the specified argument reference expression.
e: A System.CodeDom.CodeArgumentReferenceExpression that indicates the expression
to generate code for.
"""
pass
def GenerateArrayCreateExpression(self, *args): #cannot find CLR method
"""
GenerateArrayCreateExpression(self: CodeGenerator, e: CodeArrayCreateExpression)
Generates code for the specified array creation expression.
e: A System.CodeDom.CodeArrayCreateExpression that indicates the expression to
generate code for.
"""
pass
def GenerateArrayIndexerExpression(self, *args): #cannot find CLR method
"""
GenerateArrayIndexerExpression(self: CodeGenerator, e: CodeArrayIndexerExpression)
Generates code for the specified array indexer expression.
e: A System.CodeDom.CodeArrayIndexerExpression that indicates the expression to
generate code for.
"""
pass
def GenerateAssignStatement(self, *args): #cannot find CLR method
"""
GenerateAssignStatement(self: CodeGenerator, e: CodeAssignStatement)
Generates code for the specified assignment statement.
e: A System.CodeDom.CodeAssignStatement that indicates the statement to generate
code for.
"""
pass
def GenerateAttachEventStatement(self, *args): #cannot find CLR method
"""
GenerateAttachEventStatement(self: CodeGenerator, e: CodeAttachEventStatement)
Generates code for the specified attach event statement.
e: A System.CodeDom.CodeAttachEventStatement that indicates the statement to
generate code for.
"""
pass
def GenerateAttributeDeclarationsEnd(self, *args): #cannot find CLR method
"""
GenerateAttributeDeclarationsEnd(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute block end.
attributes: A System.CodeDom.CodeAttributeDeclarationCollection that indicates the end of
the attribute block to generate code for.
"""
pass
def GenerateAttributeDeclarationsStart(self, *args): #cannot find CLR method
"""
GenerateAttributeDeclarationsStart(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute block start.
attributes: A System.CodeDom.CodeAttributeDeclarationCollection that indicates the start of
the attribute block to generate code for.
"""
pass
def GenerateBaseReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateBaseReferenceExpression(self: CodeGenerator, e: CodeBaseReferenceExpression)
Generates code for the specified base reference expression.
e: A System.CodeDom.CodeBaseReferenceExpression that indicates the expression to
generate code for.
"""
pass
def GenerateBinaryOperatorExpression(self, *args): #cannot find CLR method
"""
GenerateBinaryOperatorExpression(self: CodeGenerator, e: CodeBinaryOperatorExpression)
Generates code for the specified binary operator expression.
e: A System.CodeDom.CodeBinaryOperatorExpression that indicates the expression to
generate code for.
"""
pass
def GenerateCastExpression(self, *args): #cannot find CLR method
"""
GenerateCastExpression(self: CodeGenerator, e: CodeCastExpression)
Generates code for the specified cast expression.
e: A System.CodeDom.CodeCastExpression that indicates the expression to generate
code for.
"""
pass
def GenerateCodeFromMember(self, member, writer, options):
"""
GenerateCodeFromMember(self: CodeGenerator, member: CodeTypeMember, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified class member using the specified text writer
and code generator options.
member: The class member to generate code for.
writer: The text writer to output code to.
options: The options to use when generating the code.
"""
pass
def GenerateComment(self, *args): #cannot find CLR method
"""
GenerateComment(self: CodeGenerator, e: CodeComment)
Generates code for the specified comment.
e: A System.CodeDom.CodeComment to generate code for.
"""
pass
def GenerateCommentStatement(self, *args): #cannot find CLR method
"""
GenerateCommentStatement(self: CodeGenerator, e: CodeCommentStatement)
Generates code for the specified comment statement.
e: The statement to generate code for.
"""
pass
def GenerateCommentStatements(self, *args): #cannot find CLR method
"""
GenerateCommentStatements(self: CodeGenerator, e: CodeCommentStatementCollection)
Generates code for the specified comment statements.
e: The expression to generate code for.
"""
pass
def GenerateCompileUnit(self, *args): #cannot find CLR method
"""
GenerateCompileUnit(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the specified compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateCompileUnitEnd(self, *args): #cannot find CLR method
"""
GenerateCompileUnitEnd(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the end of a compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateCompileUnitStart(self, *args): #cannot find CLR method
"""
GenerateCompileUnitStart(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the start of a compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateConditionStatement(self, *args): #cannot find CLR method
"""
GenerateConditionStatement(self: CodeGenerator, e: CodeConditionStatement)
Generates code for the specified conditional statement.
e: The statement to generate code for.
"""
pass
def GenerateConstructor(self, *args): #cannot find CLR method
"""
GenerateConstructor(self: CodeGenerator, e: CodeConstructor, c: CodeTypeDeclaration)
Generates code for the specified constructor.
e: The constructor to generate code for.
c: The type of the object that this constructor constructs.
"""
pass
def GenerateDecimalValue(self, *args): #cannot find CLR method
"""
GenerateDecimalValue(self: CodeGenerator, d: Decimal)
Generates code for the specified decimal value.
d: The decimal value to generate code for.
"""
pass
def GenerateDefaultValueExpression(self, *args): #cannot find CLR method
"""
GenerateDefaultValueExpression(self: CodeGenerator, e: CodeDefaultValueExpression)
Generates code for the specified reference to a default value.
e: The reference to generate code for.
"""
pass
def GenerateDelegateCreateExpression(self, *args): #cannot find CLR method
"""
GenerateDelegateCreateExpression(self: CodeGenerator, e: CodeDelegateCreateExpression)
Generates code for the specified delegate creation expression.
e: The expression to generate code for.
"""
pass
def GenerateDelegateInvokeExpression(self, *args): #cannot find CLR method
"""
GenerateDelegateInvokeExpression(self: CodeGenerator, e: CodeDelegateInvokeExpression)
Generates code for the specified delegate invoke expression.
e: The expression to generate code for.
"""
pass
def GenerateDirectionExpression(self, *args): #cannot find CLR method
"""
GenerateDirectionExpression(self: CodeGenerator, e: CodeDirectionExpression)
Generates code for the specified direction expression.
e: The expression to generate code for.
"""
pass
def GenerateDirectives(self, *args): #cannot find CLR method
"""
GenerateDirectives(self: CodeGenerator, directives: CodeDirectiveCollection)
Generates code for the specified code directives.
directives: The code directives to generate code for.
"""
pass
def GenerateDoubleValue(self, *args): #cannot find CLR method
"""
GenerateDoubleValue(self: CodeGenerator, d: float)
Generates code for a double-precision floating point number.
d: The value to generate code for.
"""
pass
def GenerateEntryPointMethod(self, *args): #cannot find CLR method
"""
GenerateEntryPointMethod(self: CodeGenerator, e: CodeEntryPointMethod, c: CodeTypeDeclaration)
Generates code for the specified entry point method.
e: The entry point for the code.
c: The code that declares the type.
"""
pass
def GenerateEvent(self, *args): #cannot find CLR method
"""
GenerateEvent(self: CodeGenerator, e: CodeMemberEvent, c: CodeTypeDeclaration)
Generates code for the specified event.
e: The member event to generate code for.
c: The type of the object that this event occurs on.
"""
pass
def GenerateEventReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateEventReferenceExpression(self: CodeGenerator, e: CodeEventReferenceExpression)
Generates code for the specified event reference expression.
e: The expression to generate code for.
"""
pass
def GenerateExpression(self, *args): #cannot find CLR method
"""
GenerateExpression(self: CodeGenerator, e: CodeExpression)
Generates code for the specified code expression.
e: The code expression to generate code for.
"""
pass
def GenerateExpressionStatement(self, *args): #cannot find CLR method
"""
GenerateExpressionStatement(self: CodeGenerator, e: CodeExpressionStatement)
Generates code for the specified expression statement.
e: The statement to generate code for.
"""
pass
def GenerateField(self, *args): #cannot find CLR method
"""
GenerateField(self: CodeGenerator, e: CodeMemberField)
Generates code for the specified member field.
e: The field to generate code for.
"""
pass
def GenerateFieldReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateFieldReferenceExpression(self: CodeGenerator, e: CodeFieldReferenceExpression)
Generates code for the specified field reference expression.
e: The expression to generate code for.
"""
pass
def GenerateGotoStatement(self, *args): #cannot find CLR method
"""
GenerateGotoStatement(self: CodeGenerator, e: CodeGotoStatement)
Generates code for the specified goto statement.
e: The expression to generate code for.
"""
pass
def GenerateIndexerExpression(self, *args): #cannot find CLR method
"""
GenerateIndexerExpression(self: CodeGenerator, e: CodeIndexerExpression)
Generates code for the specified indexer expression.
e: The expression to generate code for.
"""
pass
def GenerateIterationStatement(self, *args): #cannot find CLR method
"""
GenerateIterationStatement(self: CodeGenerator, e: CodeIterationStatement)
Generates code for the specified iteration statement.
e: The statement to generate code for.
"""
pass
def GenerateLabeledStatement(self, *args): #cannot find CLR method
"""
GenerateLabeledStatement(self: CodeGenerator, e: CodeLabeledStatement)
Generates code for the specified labeled statement.
e: The statement to generate code for.
"""
pass
def GenerateLinePragmaEnd(self, *args): #cannot find CLR method
"""
GenerateLinePragmaEnd(self: CodeGenerator, e: CodeLinePragma)
Generates code for the specified line pragma end.
e: The end of the line pragma to generate code for.
"""
pass
def GenerateLinePragmaStart(self, *args): #cannot find CLR method
"""
GenerateLinePragmaStart(self: CodeGenerator, e: CodeLinePragma)
Generates code for the specified line pragma start.
e: The start of the line pragma to generate code for.
"""
pass
def GenerateMethod(self, *args): #cannot find CLR method
"""
GenerateMethod(self: CodeGenerator, e: CodeMemberMethod, c: CodeTypeDeclaration)
Generates code for the specified method.
e: The member method to generate code for.
c: The type of the object that this method occurs on.
"""
pass
def GenerateMethodInvokeExpression(self, *args): #cannot find CLR method
"""
GenerateMethodInvokeExpression(self: CodeGenerator, e: CodeMethodInvokeExpression)
Generates code for the specified method invoke expression.
e: The expression to generate code for.
"""
pass
def GenerateMethodReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateMethodReferenceExpression(self: CodeGenerator, e: CodeMethodReferenceExpression)
Generates code for the specified method reference expression.
e: The expression to generate code for.
"""
pass
def GenerateMethodReturnStatement(self, *args): #cannot find CLR method
"""
GenerateMethodReturnStatement(self: CodeGenerator, e: CodeMethodReturnStatement)
Generates code for the specified method return statement.
e: The statement to generate code for.
"""
pass
def GenerateNamespace(self, *args): #cannot find CLR method
"""
GenerateNamespace(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace.
e: The namespace to generate code for.
"""
pass
def GenerateNamespaceEnd(self, *args): #cannot find CLR method
"""
GenerateNamespaceEnd(self: CodeGenerator, e: CodeNamespace)
Generates code for the end of a namespace.
e: The namespace to generate code for.
"""
pass
def GenerateNamespaceImport(self, *args): #cannot find CLR method
"""
GenerateNamespaceImport(self: CodeGenerator, e: CodeNamespaceImport)
Generates code for the specified namespace import.
e: The namespace import to generate code for.
"""
pass
def GenerateNamespaceImports(self, *args): #cannot find CLR method
"""
GenerateNamespaceImports(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace import.
e: The namespace import to generate code for.
"""
pass
def GenerateNamespaces(self, *args): #cannot find CLR method
"""
GenerateNamespaces(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the namespaces in the specified compile unit.
e: The compile unit to generate namespaces for.
"""
pass
def GenerateNamespaceStart(self, *args): #cannot find CLR method
"""
GenerateNamespaceStart(self: CodeGenerator, e: CodeNamespace)
Generates code for the start of a namespace.
e: The namespace to generate code for.
"""
pass
def GenerateObjectCreateExpression(self, *args): #cannot find CLR method
"""
GenerateObjectCreateExpression(self: CodeGenerator, e: CodeObjectCreateExpression)
Generates code for the specified object creation expression.
e: The expression to generate code for.
"""
pass
def GenerateParameterDeclarationExpression(self, *args): #cannot find CLR method
"""
GenerateParameterDeclarationExpression(self: CodeGenerator, e: CodeParameterDeclarationExpression)
Generates code for the specified parameter declaration expression.
e: The expression to generate code for.
"""
pass
def GeneratePrimitiveExpression(self, *args): #cannot find CLR method
"""
GeneratePrimitiveExpression(self: CodeGenerator, e: CodePrimitiveExpression)
Generates code for the specified primitive expression.
e: The expression to generate code for.
"""
pass
def GenerateProperty(self, *args): #cannot find CLR method
"""
GenerateProperty(self: CodeGenerator, e: CodeMemberProperty, c: CodeTypeDeclaration)
Generates code for the specified property.
e: The property to generate code for.
c: The type of the object that this property occurs on.
"""
pass
def GeneratePropertyReferenceExpression(self, *args): #cannot find CLR method
"""
GeneratePropertyReferenceExpression(self: CodeGenerator, e: CodePropertyReferenceExpression)
Generates code for the specified property reference expression.
e: The expression to generate code for.
"""
pass
def GeneratePropertySetValueReferenceExpression(self, *args): #cannot find CLR method
"""
GeneratePropertySetValueReferenceExpression(self: CodeGenerator, e: CodePropertySetValueReferenceExpression)
Generates code for the specified property set value reference expression.
e: The expression to generate code for.
"""
pass
def GenerateRemoveEventStatement(self, *args): #cannot find CLR method
"""
GenerateRemoveEventStatement(self: CodeGenerator, e: CodeRemoveEventStatement)
Generates code for the specified remove event statement.
e: The statement to generate code for.
"""
pass
def GenerateSingleFloatValue(self, *args): #cannot find CLR method
"""
GenerateSingleFloatValue(self: CodeGenerator, s: Single)
Generates code for a single-precision floating point number.
s: The value to generate code for.
"""
pass
def GenerateSnippetCompileUnit(self, *args): #cannot find CLR method
"""
GenerateSnippetCompileUnit(self: CodeGenerator, e: CodeSnippetCompileUnit)
Outputs the code of the specified literal code fragment compile unit.
e: The literal code fragment compile unit to generate code for.
"""
pass
def GenerateSnippetExpression(self, *args): #cannot find CLR method
"""
GenerateSnippetExpression(self: CodeGenerator, e: CodeSnippetExpression)
Outputs the code of the specified literal code fragment expression.
e: The expression to generate code for.
"""
pass
def GenerateSnippetMember(self, *args): #cannot find CLR method
"""
GenerateSnippetMember(self: CodeGenerator, e: CodeSnippetTypeMember)
Outputs the code of the specified literal code fragment class member.
e: The member to generate code for.
"""
pass
def GenerateSnippetStatement(self, *args): #cannot find CLR method
"""
GenerateSnippetStatement(self: CodeGenerator, e: CodeSnippetStatement)
Outputs the code of the specified literal code fragment statement.
e: The statement to generate code for.
"""
pass
def GenerateStatement(self, *args): #cannot find CLR method
"""
GenerateStatement(self: CodeGenerator, e: CodeStatement)
Generates code for the specified statement.
e: The statement to generate code for.
"""
pass
def GenerateStatements(self, *args): #cannot find CLR method
"""
GenerateStatements(self: CodeGenerator, stms: CodeStatementCollection)
Generates code for the specified statement collection.
stms: The statements to generate code for.
"""
pass
def GenerateThisReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateThisReferenceExpression(self: CodeGenerator, e: CodeThisReferenceExpression)
Generates code for the specified this reference expression.
e: The expression to generate code for.
"""
pass
def GenerateThrowExceptionStatement(self, *args): #cannot find CLR method
"""
GenerateThrowExceptionStatement(self: CodeGenerator, e: CodeThrowExceptionStatement)
Generates code for the specified throw exception statement.
e: The statement to generate code for.
"""
pass
def GenerateTryCatchFinallyStatement(self, *args): #cannot find CLR method
"""
GenerateTryCatchFinallyStatement(self: CodeGenerator, e: CodeTryCatchFinallyStatement)
Generates code for the specified try...catch...finally statement.
e: The statement to generate code for.
"""
pass
def GenerateTypeConstructor(self, *args): #cannot find CLR method
"""
GenerateTypeConstructor(self: CodeGenerator, e: CodeTypeConstructor)
Generates code for the specified class constructor.
e: The class constructor to generate code for.
"""
pass
def GenerateTypeEnd(self, *args): #cannot find CLR method
"""
GenerateTypeEnd(self: CodeGenerator, e: CodeTypeDeclaration)
Generates code for the specified end of the class.
e: The end of the class to generate code for.
"""
pass
def GenerateTypeOfExpression(self, *args): #cannot find CLR method
"""
GenerateTypeOfExpression(self: CodeGenerator, e: CodeTypeOfExpression)
Generates code for the specified type of expression.
e: The expression to generate code for.
"""
pass
def GenerateTypeReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateTypeReferenceExpression(self: CodeGenerator, e: CodeTypeReferenceExpression)
Generates code for the specified type reference expression.
e: The expression to generate code for.
"""
pass
def GenerateTypes(self, *args): #cannot find CLR method
"""
GenerateTypes(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace and the classes it contains.
e: The namespace to generate classes for.
"""
pass
def GenerateTypeStart(self, *args): #cannot find CLR method
"""
GenerateTypeStart(self: CodeGenerator, e: CodeTypeDeclaration)
Generates code for the specified start of the class.
e: The start of the class to generate code for.
"""
pass
def GenerateVariableDeclarationStatement(self, *args): #cannot find CLR method
"""
GenerateVariableDeclarationStatement(self: CodeGenerator, e: CodeVariableDeclarationStatement)
Generates code for the specified variable declaration statement.
e: The statement to generate code for.
"""
pass
def GenerateVariableReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateVariableReferenceExpression(self: CodeGenerator, e: CodeVariableReferenceExpression)
Generates code for the specified variable reference expression.
e: The expression to generate code for.
"""
pass
def GetTypeOutput(self, *args): #cannot find CLR method
"""
GetTypeOutput(self: CodeGenerator, value: CodeTypeReference) -> str
Gets the name of the specified data type.
value: The type whose name will be returned.
Returns: The name of the data type reference.
"""
pass
def IsValidIdentifier(self, *args): #cannot find CLR method
"""
IsValidIdentifier(self: CodeGenerator, value: str) -> bool
Gets a value indicating whether the specified value is a valid identifier.
value: The value to test for conflicts with valid identifiers.
Returns: true if the value is a valid identifier; otherwise, false.
"""
pass
@staticmethod
def IsValidLanguageIndependentIdentifier(value):
"""
IsValidLanguageIndependentIdentifier(value: str) -> bool
Gets a value indicating whether the specified string is a valid identifier.
value: The string to test for validity.
Returns: true if the specified string is a valid identifier; otherwise, false.
"""
pass
def OutputAttributeArgument(self, *args): #cannot find CLR method
"""
OutputAttributeArgument(self: CodeGenerator, arg: CodeAttributeArgument)
Outputs an argument in an attribute block.
arg: The attribute argument to generate code for.
"""
pass
def OutputAttributeDeclarations(self, *args): #cannot find CLR method
"""
OutputAttributeDeclarations(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute declaration collection.
attributes: The attributes to generate code for.
"""
pass
def OutputDirection(self, *args): #cannot find CLR method
"""
OutputDirection(self: CodeGenerator, dir: FieldDirection)
Generates code for the specified System.CodeDom.FieldDirection.
dir: One of the enumeration values that indicates the attribute of the field.
"""
pass
def OutputExpressionList(self, *args): #cannot find CLR method
"""
OutputExpressionList(self: CodeGenerator, expressions: CodeExpressionCollection, newlineBetweenItems: bool)
Generates code for the specified expression list.
expressions: The expressions to generate code for.
newlineBetweenItems: true to insert a new line after each item; otherwise, false.
OutputExpressionList(self: CodeGenerator, expressions: CodeExpressionCollection)
Generates code for the specified expression list.
expressions: The expressions to generate code for.
"""
pass
def OutputFieldScopeModifier(self, *args): #cannot find CLR method
"""
OutputFieldScopeModifier(self: CodeGenerator, attributes: MemberAttributes)
Outputs a field scope modifier that corresponds to the specified attributes.
attributes: One of the enumeration values that specifies the attributes.
"""
pass
def OutputIdentifier(self, *args): #cannot find CLR method
"""
OutputIdentifier(self: CodeGenerator, ident: str)
Outputs the specified identifier.
ident: The identifier to output.
"""
pass
def OutputMemberAccessModifier(self, *args): #cannot find CLR method
"""
OutputMemberAccessModifier(self: CodeGenerator, attributes: MemberAttributes)
Generates code for the specified member access modifier.
attributes: One of the enumeration values that indicates the member access modifier to
generate code for.
"""
pass
def OutputMemberScopeModifier(self, *args): #cannot find CLR method
"""
OutputMemberScopeModifier(self: CodeGenerator, attributes: MemberAttributes)
Generates code for the specified member scope modifier.
attributes: One of the enumeration values that indicates the member scope modifier to
generate code for.
"""
pass
def OutputOperator(self, *args): #cannot find CLR method
"""
OutputOperator(self: CodeGenerator, op: CodeBinaryOperatorType)
Generates code for the specified operator.
op: The operator to generate code for.
"""
pass
def OutputParameters(self, *args): #cannot find CLR method
"""
OutputParameters(self: CodeGenerator, parameters: CodeParameterDeclarationExpressionCollection)
Generates code for the specified parameters.
parameters: The parameter declaration expressions to generate code for.
"""
pass
def OutputType(self, *args): #cannot find CLR method
"""
OutputType(self: CodeGenerator, typeRef: CodeTypeReference)
Generates code for the specified type.
typeRef: The type to generate code for.
"""
pass
def OutputTypeAttributes(self, *args): #cannot find CLR method
"""
OutputTypeAttributes(self: CodeGenerator, attributes: TypeAttributes, isStruct: bool, isEnum: bool)
Generates code for the specified type attributes.
attributes: One of the enumeration values that indicates the type attributes to generate
code for.
isStruct: true if the type is a struct; otherwise, false.
isEnum: true if the type is an enum; otherwise, false.
"""
pass
def OutputTypeNamePair(self, *args): #cannot find CLR method
"""
OutputTypeNamePair(self: CodeGenerator, typeRef: CodeTypeReference, name: str)
Generates code for the specified object type and name pair.
typeRef: The type.
name: The name for the object.
"""
pass
def QuoteSnippetString(self, *args): #cannot find CLR method
"""
QuoteSnippetString(self: CodeGenerator, value: str) -> str
Converts the specified string by formatting it with escape codes.
value: The string to convert.
Returns: The converted string.
"""
pass
def Supports(self, *args): #cannot find CLR method
"""
Supports(self: CodeGenerator, support: GeneratorSupport) -> bool
Gets a value indicating whether the specified code generation support is
provided.
support: The type of code generation support to test for.
Returns: true if the specified code generation support is provided; otherwise, false.
"""
pass
def ValidateIdentifier(self, *args): #cannot find CLR method
"""
ValidateIdentifier(self: CodeGenerator, value: str)
Throws an exception if the specified string is not a valid identifier.
value: The identifier to test for validity as an identifier.
"""
pass
@staticmethod
def ValidateIdentifiers(e):
"""
ValidateIdentifiers(e: CodeObject)
Attempts to validate each identifier field contained in the specified
System.CodeDom.CodeObject or System.CodeDom tree.
e: An object to test for invalid identifiers.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
CurrentClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the code type declaration for the current class.
"""
CurrentMember = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current member of the class.
"""
CurrentMemberName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current member name.
"""
CurrentTypeName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current class name.
"""
Indent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the amount of spaces to indent each indentation level.
"""
IsCurrentClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a class.
"""
IsCurrentDelegate = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a delegate.
"""
IsCurrentEnum = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is an enumeration.
"""
IsCurrentInterface = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is an interface.
"""
IsCurrentStruct = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a value type or struct.
"""
NullToken = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the token that represents null.
"""
Options = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the options to be used by the code generator.
"""
Output = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the text writer to use for output.
"""
class ICodeCompiler:
""" Defines an interface for invoking compilation of source code or a CodeDOM tree using a specific compiler. """
def CompileAssemblyFromDom(self, options, compilationUnit):
"""
CompileAssemblyFromDom(self: ICodeCompiler, options: CompilerParameters, compilationUnit: CodeCompileUnit) -> CompilerResults
Compiles an assembly from the System.CodeDom tree contained in the specified
System.CodeDom.CodeCompileUnit, using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
compilationUnit: A System.CodeDom.CodeCompileUnit that indicates the code to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromDomBatch(self, options, compilationUnits):
"""
CompileAssemblyFromDomBatch(self: ICodeCompiler, options: CompilerParameters, compilationUnits: Array[CodeCompileUnit]) -> CompilerResults
Compiles an assembly based on the System.CodeDom trees contained in the
specified array of System.CodeDom.CodeCompileUnit objects, using the specified
compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
compilationUnits: An array of type System.CodeDom.CodeCompileUnit that indicates the code to
compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromFile(self, options, fileName):
"""
CompileAssemblyFromFile(self: ICodeCompiler, options: CompilerParameters, fileName: str) -> CompilerResults
Compiles an assembly from the source code contained within the specified file,
using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
fileName: The file name of the file that contains the source code to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromFileBatch(self, options, fileNames):
"""
CompileAssemblyFromFileBatch(self: ICodeCompiler, options: CompilerParameters, fileNames: Array[str]) -> CompilerResults
Compiles an assembly from the source code contained within the specified files,
using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
fileNames: The file names of the files to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromSource(self, options, source):
"""
CompileAssemblyFromSource(self: ICodeCompiler, options: CompilerParameters, source: str) -> CompilerResults
Compiles an assembly from the specified string containing source code, using
the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
source: The source code to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromSourceBatch(self, options, sources):
"""
CompileAssemblyFromSourceBatch(self: ICodeCompiler, options: CompilerParameters, sources: Array[str]) -> CompilerResults
Compiles an assembly from the specified array of strings containing source
code, using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for compilation.
sources: The source code strings to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class CodeCompiler(CodeGenerator, ICodeGenerator, ICodeCompiler):
""" Provides an example implementation of the System.CodeDom.Compiler.ICodeCompiler interface. """
def CmdArgsFromParameters(self, *args): #cannot find CLR method
"""
CmdArgsFromParameters(self: CodeCompiler, options: CompilerParameters) -> str
Gets the command arguments to be passed to the compiler from the specified
System.CodeDom.Compiler.CompilerParameters.
options: A System.CodeDom.Compiler.CompilerParameters that indicates the compiler
options.
Returns: The command arguments.
"""
pass
def ContinueOnNewLine(self, *args): #cannot find CLR method
"""
ContinueOnNewLine(self: CodeGenerator, st: str)
Generates a line-continuation character and outputs the specified string on a
new line.
st: The string to write on the new line.
"""
pass
def CreateEscapedIdentifier(self, *args): #cannot find CLR method
"""
CreateEscapedIdentifier(self: CodeGenerator, value: str) -> str
Creates an escaped identifier for the specified value.
value: The string to create an escaped identifier for.
Returns: The escaped identifier for the value.
"""
pass
def CreateValidIdentifier(self, *args): #cannot find CLR method
"""
CreateValidIdentifier(self: CodeGenerator, value: str) -> str
Creates a valid identifier for the specified value.
value: A string to create a valid identifier for.
Returns: A valid identifier for the value.
"""
pass
def FromDom(self, *args): #cannot find CLR method
"""
FromDom(self: CodeCompiler, options: CompilerParameters, e: CodeCompileUnit) -> CompilerResults
Compiles the specified compile unit using the specified options, and returns
the results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
e: A System.CodeDom.CodeCompileUnit object that indicates the source to compile.
Returns: The results of compilation.
"""
pass
def FromDomBatch(self, *args): #cannot find CLR method
"""
FromDomBatch(self: CodeCompiler, options: CompilerParameters, ea: Array[CodeCompileUnit]) -> CompilerResults
Compiles the specified compile units using the specified options, and returns
the results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
ea: An array of System.CodeDom.CodeCompileUnit objects that indicates the source to
compile.
Returns: The results of compilation.
"""
pass
def FromFile(self, *args): #cannot find CLR method
"""
FromFile(self: CodeCompiler, options: CompilerParameters, fileName: str) -> CompilerResults
Compiles the specified file using the specified options, and returns the
results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
fileName: The file name to compile.
Returns: The results of compilation.
"""
pass
def FromFileBatch(self, *args): #cannot find CLR method
"""
FromFileBatch(self: CodeCompiler, options: CompilerParameters, fileNames: Array[str]) -> CompilerResults
Compiles the specified files using the specified options, and returns the
results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
fileNames: An array of strings that indicates the file names of the files to compile.
Returns: The results of compilation.
"""
pass
def FromSource(self, *args): #cannot find CLR method
"""
FromSource(self: CodeCompiler, options: CompilerParameters, source: str) -> CompilerResults
Compiles the specified source code string using the specified options, and
returns the results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
source: The source code string to compile.
Returns: The results of compilation.
"""
pass
def FromSourceBatch(self, *args): #cannot find CLR method
"""
FromSourceBatch(self: CodeCompiler, options: CompilerParameters, sources: Array[str]) -> CompilerResults
Compiles the specified source code strings using the specified options, and
returns the results from the compilation.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
sources: An array of strings containing the source code to compile.
Returns: The results of compilation.
"""
pass
def GenerateArgumentReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateArgumentReferenceExpression(self: CodeGenerator, e: CodeArgumentReferenceExpression)
Generates code for the specified argument reference expression.
e: A System.CodeDom.CodeArgumentReferenceExpression that indicates the expression
to generate code for.
"""
pass
def GenerateArrayCreateExpression(self, *args): #cannot find CLR method
"""
GenerateArrayCreateExpression(self: CodeGenerator, e: CodeArrayCreateExpression)
Generates code for the specified array creation expression.
e: A System.CodeDom.CodeArrayCreateExpression that indicates the expression to
generate code for.
"""
pass
def GenerateArrayIndexerExpression(self, *args): #cannot find CLR method
"""
GenerateArrayIndexerExpression(self: CodeGenerator, e: CodeArrayIndexerExpression)
Generates code for the specified array indexer expression.
e: A System.CodeDom.CodeArrayIndexerExpression that indicates the expression to
generate code for.
"""
pass
def GenerateAssignStatement(self, *args): #cannot find CLR method
"""
GenerateAssignStatement(self: CodeGenerator, e: CodeAssignStatement)
Generates code for the specified assignment statement.
e: A System.CodeDom.CodeAssignStatement that indicates the statement to generate
code for.
"""
pass
def GenerateAttachEventStatement(self, *args): #cannot find CLR method
"""
GenerateAttachEventStatement(self: CodeGenerator, e: CodeAttachEventStatement)
Generates code for the specified attach event statement.
e: A System.CodeDom.CodeAttachEventStatement that indicates the statement to
generate code for.
"""
pass
def GenerateAttributeDeclarationsEnd(self, *args): #cannot find CLR method
"""
GenerateAttributeDeclarationsEnd(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute block end.
attributes: A System.CodeDom.CodeAttributeDeclarationCollection that indicates the end of
the attribute block to generate code for.
"""
pass
def GenerateAttributeDeclarationsStart(self, *args): #cannot find CLR method
"""
GenerateAttributeDeclarationsStart(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute block start.
attributes: A System.CodeDom.CodeAttributeDeclarationCollection that indicates the start of
the attribute block to generate code for.
"""
pass
def GenerateBaseReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateBaseReferenceExpression(self: CodeGenerator, e: CodeBaseReferenceExpression)
Generates code for the specified base reference expression.
e: A System.CodeDom.CodeBaseReferenceExpression that indicates the expression to
generate code for.
"""
pass
def GenerateBinaryOperatorExpression(self, *args): #cannot find CLR method
"""
GenerateBinaryOperatorExpression(self: CodeGenerator, e: CodeBinaryOperatorExpression)
Generates code for the specified binary operator expression.
e: A System.CodeDom.CodeBinaryOperatorExpression that indicates the expression to
generate code for.
"""
pass
def GenerateCastExpression(self, *args): #cannot find CLR method
"""
GenerateCastExpression(self: CodeGenerator, e: CodeCastExpression)
Generates code for the specified cast expression.
e: A System.CodeDom.CodeCastExpression that indicates the expression to generate
code for.
"""
pass
def GenerateComment(self, *args): #cannot find CLR method
"""
GenerateComment(self: CodeGenerator, e: CodeComment)
Generates code for the specified comment.
e: A System.CodeDom.CodeComment to generate code for.
"""
pass
def GenerateCommentStatement(self, *args): #cannot find CLR method
"""
GenerateCommentStatement(self: CodeGenerator, e: CodeCommentStatement)
Generates code for the specified comment statement.
e: The statement to generate code for.
"""
pass
def GenerateCommentStatements(self, *args): #cannot find CLR method
"""
GenerateCommentStatements(self: CodeGenerator, e: CodeCommentStatementCollection)
Generates code for the specified comment statements.
e: The expression to generate code for.
"""
pass
def GenerateCompileUnit(self, *args): #cannot find CLR method
"""
GenerateCompileUnit(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the specified compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateCompileUnitEnd(self, *args): #cannot find CLR method
"""
GenerateCompileUnitEnd(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the end of a compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateCompileUnitStart(self, *args): #cannot find CLR method
"""
GenerateCompileUnitStart(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the start of a compile unit.
e: The compile unit to generate code for.
"""
pass
def GenerateConditionStatement(self, *args): #cannot find CLR method
"""
GenerateConditionStatement(self: CodeGenerator, e: CodeConditionStatement)
Generates code for the specified conditional statement.
e: The statement to generate code for.
"""
pass
def GenerateConstructor(self, *args): #cannot find CLR method
"""
GenerateConstructor(self: CodeGenerator, e: CodeConstructor, c: CodeTypeDeclaration)
Generates code for the specified constructor.
e: The constructor to generate code for.
c: The type of the object that this constructor constructs.
"""
pass
def GenerateDecimalValue(self, *args): #cannot find CLR method
"""
GenerateDecimalValue(self: CodeGenerator, d: Decimal)
Generates code for the specified decimal value.
d: The decimal value to generate code for.
"""
pass
def GenerateDefaultValueExpression(self, *args): #cannot find CLR method
"""
GenerateDefaultValueExpression(self: CodeGenerator, e: CodeDefaultValueExpression)
Generates code for the specified reference to a default value.
e: The reference to generate code for.
"""
pass
def GenerateDelegateCreateExpression(self, *args): #cannot find CLR method
"""
GenerateDelegateCreateExpression(self: CodeGenerator, e: CodeDelegateCreateExpression)
Generates code for the specified delegate creation expression.
e: The expression to generate code for.
"""
pass
def GenerateDelegateInvokeExpression(self, *args): #cannot find CLR method
"""
GenerateDelegateInvokeExpression(self: CodeGenerator, e: CodeDelegateInvokeExpression)
Generates code for the specified delegate invoke expression.
e: The expression to generate code for.
"""
pass
def GenerateDirectionExpression(self, *args): #cannot find CLR method
"""
GenerateDirectionExpression(self: CodeGenerator, e: CodeDirectionExpression)
Generates code for the specified direction expression.
e: The expression to generate code for.
"""
pass
def GenerateDirectives(self, *args): #cannot find CLR method
"""
GenerateDirectives(self: CodeGenerator, directives: CodeDirectiveCollection)
Generates code for the specified code directives.
directives: The code directives to generate code for.
"""
pass
def GenerateDoubleValue(self, *args): #cannot find CLR method
"""
GenerateDoubleValue(self: CodeGenerator, d: float)
Generates code for a double-precision floating point number.
d: The value to generate code for.
"""
pass
def GenerateEntryPointMethod(self, *args): #cannot find CLR method
"""
GenerateEntryPointMethod(self: CodeGenerator, e: CodeEntryPointMethod, c: CodeTypeDeclaration)
Generates code for the specified entry point method.
e: The entry point for the code.
c: The code that declares the type.
"""
pass
def GenerateEvent(self, *args): #cannot find CLR method
"""
GenerateEvent(self: CodeGenerator, e: CodeMemberEvent, c: CodeTypeDeclaration)
Generates code for the specified event.
e: The member event to generate code for.
c: The type of the object that this event occurs on.
"""
pass
def GenerateEventReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateEventReferenceExpression(self: CodeGenerator, e: CodeEventReferenceExpression)
Generates code for the specified event reference expression.
e: The expression to generate code for.
"""
pass
def GenerateExpression(self, *args): #cannot find CLR method
"""
GenerateExpression(self: CodeGenerator, e: CodeExpression)
Generates code for the specified code expression.
e: The code expression to generate code for.
"""
pass
def GenerateExpressionStatement(self, *args): #cannot find CLR method
"""
GenerateExpressionStatement(self: CodeGenerator, e: CodeExpressionStatement)
Generates code for the specified expression statement.
e: The statement to generate code for.
"""
pass
def GenerateField(self, *args): #cannot find CLR method
"""
GenerateField(self: CodeGenerator, e: CodeMemberField)
Generates code for the specified member field.
e: The field to generate code for.
"""
pass
def GenerateFieldReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateFieldReferenceExpression(self: CodeGenerator, e: CodeFieldReferenceExpression)
Generates code for the specified field reference expression.
e: The expression to generate code for.
"""
pass
def GenerateGotoStatement(self, *args): #cannot find CLR method
"""
GenerateGotoStatement(self: CodeGenerator, e: CodeGotoStatement)
Generates code for the specified goto statement.
e: The expression to generate code for.
"""
pass
def GenerateIndexerExpression(self, *args): #cannot find CLR method
"""
GenerateIndexerExpression(self: CodeGenerator, e: CodeIndexerExpression)
Generates code for the specified indexer expression.
e: The expression to generate code for.
"""
pass
def GenerateIterationStatement(self, *args): #cannot find CLR method
"""
GenerateIterationStatement(self: CodeGenerator, e: CodeIterationStatement)
Generates code for the specified iteration statement.
e: The statement to generate code for.
"""
pass
def GenerateLabeledStatement(self, *args): #cannot find CLR method
"""
GenerateLabeledStatement(self: CodeGenerator, e: CodeLabeledStatement)
Generates code for the specified labeled statement.
e: The statement to generate code for.
"""
pass
def GenerateLinePragmaEnd(self, *args): #cannot find CLR method
"""
GenerateLinePragmaEnd(self: CodeGenerator, e: CodeLinePragma)
Generates code for the specified line pragma end.
e: The end of the line pragma to generate code for.
"""
pass
def GenerateLinePragmaStart(self, *args): #cannot find CLR method
"""
GenerateLinePragmaStart(self: CodeGenerator, e: CodeLinePragma)
Generates code for the specified line pragma start.
e: The start of the line pragma to generate code for.
"""
pass
def GenerateMethod(self, *args): #cannot find CLR method
"""
GenerateMethod(self: CodeGenerator, e: CodeMemberMethod, c: CodeTypeDeclaration)
Generates code for the specified method.
e: The member method to generate code for.
c: The type of the object that this method occurs on.
"""
pass
def GenerateMethodInvokeExpression(self, *args): #cannot find CLR method
"""
GenerateMethodInvokeExpression(self: CodeGenerator, e: CodeMethodInvokeExpression)
Generates code for the specified method invoke expression.
e: The expression to generate code for.
"""
pass
def GenerateMethodReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateMethodReferenceExpression(self: CodeGenerator, e: CodeMethodReferenceExpression)
Generates code for the specified method reference expression.
e: The expression to generate code for.
"""
pass
def GenerateMethodReturnStatement(self, *args): #cannot find CLR method
"""
GenerateMethodReturnStatement(self: CodeGenerator, e: CodeMethodReturnStatement)
Generates code for the specified method return statement.
e: The statement to generate code for.
"""
pass
def GenerateNamespace(self, *args): #cannot find CLR method
"""
GenerateNamespace(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace.
e: The namespace to generate code for.
"""
pass
def GenerateNamespaceEnd(self, *args): #cannot find CLR method
"""
GenerateNamespaceEnd(self: CodeGenerator, e: CodeNamespace)
Generates code for the end of a namespace.
e: The namespace to generate code for.
"""
pass
def GenerateNamespaceImport(self, *args): #cannot find CLR method
"""
GenerateNamespaceImport(self: CodeGenerator, e: CodeNamespaceImport)
Generates code for the specified namespace import.
e: The namespace import to generate code for.
"""
pass
def GenerateNamespaceImports(self, *args): #cannot find CLR method
"""
GenerateNamespaceImports(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace import.
e: The namespace import to generate code for.
"""
pass
def GenerateNamespaces(self, *args): #cannot find CLR method
"""
GenerateNamespaces(self: CodeGenerator, e: CodeCompileUnit)
Generates code for the namespaces in the specified compile unit.
e: The compile unit to generate namespaces for.
"""
pass
def GenerateNamespaceStart(self, *args): #cannot find CLR method
"""
GenerateNamespaceStart(self: CodeGenerator, e: CodeNamespace)
Generates code for the start of a namespace.
e: The namespace to generate code for.
"""
pass
def GenerateObjectCreateExpression(self, *args): #cannot find CLR method
"""
GenerateObjectCreateExpression(self: CodeGenerator, e: CodeObjectCreateExpression)
Generates code for the specified object creation expression.
e: The expression to generate code for.
"""
pass
def GenerateParameterDeclarationExpression(self, *args): #cannot find CLR method
"""
GenerateParameterDeclarationExpression(self: CodeGenerator, e: CodeParameterDeclarationExpression)
Generates code for the specified parameter declaration expression.
e: The expression to generate code for.
"""
pass
def GeneratePrimitiveExpression(self, *args): #cannot find CLR method
"""
GeneratePrimitiveExpression(self: CodeGenerator, e: CodePrimitiveExpression)
Generates code for the specified primitive expression.
e: The expression to generate code for.
"""
pass
def GenerateProperty(self, *args): #cannot find CLR method
"""
GenerateProperty(self: CodeGenerator, e: CodeMemberProperty, c: CodeTypeDeclaration)
Generates code for the specified property.
e: The property to generate code for.
c: The type of the object that this property occurs on.
"""
pass
def GeneratePropertyReferenceExpression(self, *args): #cannot find CLR method
"""
GeneratePropertyReferenceExpression(self: CodeGenerator, e: CodePropertyReferenceExpression)
Generates code for the specified property reference expression.
e: The expression to generate code for.
"""
pass
def GeneratePropertySetValueReferenceExpression(self, *args): #cannot find CLR method
"""
GeneratePropertySetValueReferenceExpression(self: CodeGenerator, e: CodePropertySetValueReferenceExpression)
Generates code for the specified property set value reference expression.
e: The expression to generate code for.
"""
pass
def GenerateRemoveEventStatement(self, *args): #cannot find CLR method
"""
GenerateRemoveEventStatement(self: CodeGenerator, e: CodeRemoveEventStatement)
Generates code for the specified remove event statement.
e: The statement to generate code for.
"""
pass
def GenerateSingleFloatValue(self, *args): #cannot find CLR method
"""
GenerateSingleFloatValue(self: CodeGenerator, s: Single)
Generates code for a single-precision floating point number.
s: The value to generate code for.
"""
pass
def GenerateSnippetCompileUnit(self, *args): #cannot find CLR method
"""
GenerateSnippetCompileUnit(self: CodeGenerator, e: CodeSnippetCompileUnit)
Outputs the code of the specified literal code fragment compile unit.
e: The literal code fragment compile unit to generate code for.
"""
pass
def GenerateSnippetExpression(self, *args): #cannot find CLR method
"""
GenerateSnippetExpression(self: CodeGenerator, e: CodeSnippetExpression)
Outputs the code of the specified literal code fragment expression.
e: The expression to generate code for.
"""
pass
def GenerateSnippetMember(self, *args): #cannot find CLR method
"""
GenerateSnippetMember(self: CodeGenerator, e: CodeSnippetTypeMember)
Outputs the code of the specified literal code fragment class member.
e: The member to generate code for.
"""
pass
def GenerateSnippetStatement(self, *args): #cannot find CLR method
"""
GenerateSnippetStatement(self: CodeGenerator, e: CodeSnippetStatement)
Outputs the code of the specified literal code fragment statement.
e: The statement to generate code for.
"""
pass
def GenerateStatement(self, *args): #cannot find CLR method
"""
GenerateStatement(self: CodeGenerator, e: CodeStatement)
Generates code for the specified statement.
e: The statement to generate code for.
"""
pass
def GenerateStatements(self, *args): #cannot find CLR method
"""
GenerateStatements(self: CodeGenerator, stms: CodeStatementCollection)
Generates code for the specified statement collection.
stms: The statements to generate code for.
"""
pass
def GenerateThisReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateThisReferenceExpression(self: CodeGenerator, e: CodeThisReferenceExpression)
Generates code for the specified this reference expression.
e: The expression to generate code for.
"""
pass
def GenerateThrowExceptionStatement(self, *args): #cannot find CLR method
"""
GenerateThrowExceptionStatement(self: CodeGenerator, e: CodeThrowExceptionStatement)
Generates code for the specified throw exception statement.
e: The statement to generate code for.
"""
pass
def GenerateTryCatchFinallyStatement(self, *args): #cannot find CLR method
"""
GenerateTryCatchFinallyStatement(self: CodeGenerator, e: CodeTryCatchFinallyStatement)
Generates code for the specified try...catch...finally statement.
e: The statement to generate code for.
"""
pass
def GenerateTypeConstructor(self, *args): #cannot find CLR method
"""
GenerateTypeConstructor(self: CodeGenerator, e: CodeTypeConstructor)
Generates code for the specified class constructor.
e: The class constructor to generate code for.
"""
pass
def GenerateTypeEnd(self, *args): #cannot find CLR method
"""
GenerateTypeEnd(self: CodeGenerator, e: CodeTypeDeclaration)
Generates code for the specified end of the class.
e: The end of the class to generate code for.
"""
pass
def GenerateTypeOfExpression(self, *args): #cannot find CLR method
"""
GenerateTypeOfExpression(self: CodeGenerator, e: CodeTypeOfExpression)
Generates code for the specified type of expression.
e: The expression to generate code for.
"""
pass
def GenerateTypeReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateTypeReferenceExpression(self: CodeGenerator, e: CodeTypeReferenceExpression)
Generates code for the specified type reference expression.
e: The expression to generate code for.
"""
pass
def GenerateTypes(self, *args): #cannot find CLR method
"""
GenerateTypes(self: CodeGenerator, e: CodeNamespace)
Generates code for the specified namespace and the classes it contains.
e: The namespace to generate classes for.
"""
pass
def GenerateTypeStart(self, *args): #cannot find CLR method
"""
GenerateTypeStart(self: CodeGenerator, e: CodeTypeDeclaration)
Generates code for the specified start of the class.
e: The start of the class to generate code for.
"""
pass
def GenerateVariableDeclarationStatement(self, *args): #cannot find CLR method
"""
GenerateVariableDeclarationStatement(self: CodeGenerator, e: CodeVariableDeclarationStatement)
Generates code for the specified variable declaration statement.
e: The statement to generate code for.
"""
pass
def GenerateVariableReferenceExpression(self, *args): #cannot find CLR method
"""
GenerateVariableReferenceExpression(self: CodeGenerator, e: CodeVariableReferenceExpression)
Generates code for the specified variable reference expression.
e: The expression to generate code for.
"""
pass
def GetResponseFileCmdArgs(self, *args): #cannot find CLR method
"""
GetResponseFileCmdArgs(self: CodeCompiler, options: CompilerParameters, cmdArgs: str) -> str
Gets the command arguments to use when invoking the compiler to generate a
response file.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
options.
cmdArgs: A command arguments string.
Returns: The command arguments to use to generate a response file, or null if there are
no response file arguments.
"""
pass
def GetTypeOutput(self, *args): #cannot find CLR method
"""
GetTypeOutput(self: CodeGenerator, value: CodeTypeReference) -> str
Gets the name of the specified data type.
value: The type whose name will be returned.
Returns: The name of the data type reference.
"""
pass
def IsValidIdentifier(self, *args): #cannot find CLR method
"""
IsValidIdentifier(self: CodeGenerator, value: str) -> bool
Gets a value indicating whether the specified value is a valid identifier.
value: The value to test for conflicts with valid identifiers.
Returns: true if the value is a valid identifier; otherwise, false.
"""
pass
def JoinStringArray(self, *args): #cannot find CLR method
"""
JoinStringArray(sa: Array[str], separator: str) -> str
Joins the specified string arrays.
sa: The array of strings to join.
separator: The separator to use.
Returns: The concatenated string.
"""
pass
def OutputAttributeArgument(self, *args): #cannot find CLR method
"""
OutputAttributeArgument(self: CodeGenerator, arg: CodeAttributeArgument)
Outputs an argument in an attribute block.
arg: The attribute argument to generate code for.
"""
pass
def OutputAttributeDeclarations(self, *args): #cannot find CLR method
"""
OutputAttributeDeclarations(self: CodeGenerator, attributes: CodeAttributeDeclarationCollection)
Generates code for the specified attribute declaration collection.
attributes: The attributes to generate code for.
"""
pass
def OutputDirection(self, *args): #cannot find CLR method
"""
OutputDirection(self: CodeGenerator, dir: FieldDirection)
Generates code for the specified System.CodeDom.FieldDirection.
dir: One of the enumeration values that indicates the attribute of the field.
"""
pass
def OutputExpressionList(self, *args): #cannot find CLR method
"""
OutputExpressionList(self: CodeGenerator, expressions: CodeExpressionCollection, newlineBetweenItems: bool)
Generates code for the specified expression list.
expressions: The expressions to generate code for.
newlineBetweenItems: true to insert a new line after each item; otherwise, false.
OutputExpressionList(self: CodeGenerator, expressions: CodeExpressionCollection)
Generates code for the specified expression list.
expressions: The expressions to generate code for.
"""
pass
def OutputFieldScopeModifier(self, *args): #cannot find CLR method
"""
OutputFieldScopeModifier(self: CodeGenerator, attributes: MemberAttributes)
Outputs a field scope modifier that corresponds to the specified attributes.
attributes: One of the enumeration values that specifies the attributes.
"""
pass
def OutputIdentifier(self, *args): #cannot find CLR method
"""
OutputIdentifier(self: CodeGenerator, ident: str)
Outputs the specified identifier.
ident: The identifier to output.
"""
pass
def OutputMemberAccessModifier(self, *args): #cannot find CLR method
"""
OutputMemberAccessModifier(self: CodeGenerator, attributes: MemberAttributes)
Generates code for the specified member access modifier.
attributes: One of the enumeration values that indicates the member access modifier to
generate code for.
"""
pass
def OutputMemberScopeModifier(self, *args): #cannot find CLR method
"""
OutputMemberScopeModifier(self: CodeGenerator, attributes: MemberAttributes)
Generates code for the specified member scope modifier.
attributes: One of the enumeration values that indicates the member scope modifier to
generate code for.
"""
pass
def OutputOperator(self, *args): #cannot find CLR method
"""
OutputOperator(self: CodeGenerator, op: CodeBinaryOperatorType)
Generates code for the specified operator.
op: The operator to generate code for.
"""
pass
def OutputParameters(self, *args): #cannot find CLR method
"""
OutputParameters(self: CodeGenerator, parameters: CodeParameterDeclarationExpressionCollection)
Generates code for the specified parameters.
parameters: The parameter declaration expressions to generate code for.
"""
pass
def OutputType(self, *args): #cannot find CLR method
"""
OutputType(self: CodeGenerator, typeRef: CodeTypeReference)
Generates code for the specified type.
typeRef: The type to generate code for.
"""
pass
def OutputTypeAttributes(self, *args): #cannot find CLR method
"""
OutputTypeAttributes(self: CodeGenerator, attributes: TypeAttributes, isStruct: bool, isEnum: bool)
Generates code for the specified type attributes.
attributes: One of the enumeration values that indicates the type attributes to generate
code for.
isStruct: true if the type is a struct; otherwise, false.
isEnum: true if the type is an enum; otherwise, false.
"""
pass
def OutputTypeNamePair(self, *args): #cannot find CLR method
"""
OutputTypeNamePair(self: CodeGenerator, typeRef: CodeTypeReference, name: str)
Generates code for the specified object type and name pair.
typeRef: The type.
name: The name for the object.
"""
pass
def ProcessCompilerOutputLine(self, *args): #cannot find CLR method
"""
ProcessCompilerOutputLine(self: CodeCompiler, results: CompilerResults, line: str)
Processes the specified line from the specified
System.CodeDom.Compiler.CompilerResults.
results: A System.CodeDom.Compiler.CompilerResults that indicates the results of
compilation.
line: The line to process.
"""
pass
def QuoteSnippetString(self, *args): #cannot find CLR method
"""
QuoteSnippetString(self: CodeGenerator, value: str) -> str
Converts the specified string by formatting it with escape codes.
value: The string to convert.
Returns: The converted string.
"""
pass
def Supports(self, *args): #cannot find CLR method
"""
Supports(self: CodeGenerator, support: GeneratorSupport) -> bool
Gets a value indicating whether the specified code generation support is
provided.
support: The type of code generation support to test for.
Returns: true if the specified code generation support is provided; otherwise, false.
"""
pass
def ValidateIdentifier(self, *args): #cannot find CLR method
"""
ValidateIdentifier(self: CodeGenerator, value: str)
Throws an exception if the specified string is not a valid identifier.
value: The identifier to test for validity as an identifier.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CompilerName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the name of the compiler executable.
"""
CurrentClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the code type declaration for the current class.
"""
CurrentMember = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current member of the class.
"""
CurrentMemberName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current member name.
"""
CurrentTypeName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the current class name.
"""
FileExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the file name extension to use for source files.
"""
Indent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the amount of spaces to indent each indentation level.
"""
IsCurrentClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a class.
"""
IsCurrentDelegate = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a delegate.
"""
IsCurrentEnum = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is an enumeration.
"""
IsCurrentInterface = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is an interface.
"""
IsCurrentStruct = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the current object being generated is a value type or struct.
"""
NullToken = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the token that represents null.
"""
Options = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the options to be used by the code generator.
"""
Output = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the text writer to use for output.
"""
class CodeDomProvider(Component, IComponent, IDisposable):
""" Provides a base class for System.CodeDom.Compiler.CodeDomProvider implementations. This class is abstract. """
def CompileAssemblyFromDom(self, options, compilationUnits):
"""
CompileAssemblyFromDom(self: CodeDomProvider, options: CompilerParameters, *compilationUnits: Array[CodeCompileUnit]) -> CompilerResults
Compiles an assembly based on the System.CodeDom trees contained in the
specified array of System.CodeDom.CodeCompileUnit objects, using the specified
compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for the compilation.
compilationUnits: An array of type System.CodeDom.CodeCompileUnit that indicates the code to
compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
the compilation.
"""
pass
def CompileAssemblyFromFile(self, options, fileNames):
"""
CompileAssemblyFromFile(self: CodeDomProvider, options: CompilerParameters, *fileNames: Array[str]) -> CompilerResults
Compiles an assembly from the source code contained in the specified files,
using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the settings
for the compilation.
fileNames: An array of the names of the files to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CompileAssemblyFromSource(self, options, sources):
"""
CompileAssemblyFromSource(self: CodeDomProvider, options: CompilerParameters, *sources: Array[str]) -> CompilerResults
Compiles an assembly from the specified array of strings containing source
code, using the specified compiler settings.
options: A System.CodeDom.Compiler.CompilerParameters object that indicates the compiler
settings for this compilation.
sources: An array of source code strings to compile.
Returns: A System.CodeDom.Compiler.CompilerResults object that indicates the results of
compilation.
"""
pass
def CreateCompiler(self):
"""
CreateCompiler(self: CodeDomProvider) -> ICodeCompiler
When overridden in a derived class, creates a new code compiler.
Returns: An System.CodeDom.Compiler.ICodeCompiler that can be used for compilation of
System.CodeDom based source code representations.
"""
pass
def CreateEscapedIdentifier(self, value):
"""
CreateEscapedIdentifier(self: CodeDomProvider, value: str) -> str
Creates an escaped identifier for the specified value.
value: The string for which to create an escaped identifier.
Returns: The escaped identifier for the value.
"""
pass
def CreateGenerator(self, *__args):
"""
CreateGenerator(self: CodeDomProvider, fileName: str) -> ICodeGenerator
When overridden in a derived class, creates a new code generator using the
specified file name for output.
fileName: The file name to output to.
Returns: An System.CodeDom.Compiler.ICodeGenerator that can be used to generate
System.CodeDom based source code representations.
CreateGenerator(self: CodeDomProvider, output: TextWriter) -> ICodeGenerator
When overridden in a derived class, creates a new code generator using the
specified System.IO.TextWriter for output.
output: A System.IO.TextWriter to use to output.
Returns: An System.CodeDom.Compiler.ICodeGenerator that can be used to generate
System.CodeDom based source code representations.
CreateGenerator(self: CodeDomProvider) -> ICodeGenerator
When overridden in a derived class, creates a new code generator.
Returns: An System.CodeDom.Compiler.ICodeGenerator that can be used to generate
System.CodeDom based source code representations.
"""
pass
def CreateParser(self):
"""
CreateParser(self: CodeDomProvider) -> ICodeParser
When overridden in a derived class, creates a new code parser.
Returns: An System.CodeDom.Compiler.ICodeParser that can be used to parse source code.
The base implementation always returns null.
"""
pass
@staticmethod
def CreateProvider(language, providerOptions=None):
"""
CreateProvider(language: str) -> CodeDomProvider
Gets a System.CodeDom.Compiler.CodeDomProvider instance for the specified
language.
language: The language name.
Returns: A CodeDOM provider that is implemented for the specified language name.
CreateProvider(language: str, providerOptions: IDictionary[str, str]) -> CodeDomProvider
"""
pass
def CreateValidIdentifier(self, value):
"""
CreateValidIdentifier(self: CodeDomProvider, value: str) -> str
Creates a valid identifier for the specified value.
value: The string for which to generate a valid identifier.
Returns: A valid identifier for the specified value.
"""
pass
def Dispose(self):
"""
Dispose(self: Component, disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component
and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def GenerateCodeFromCompileUnit(self, compileUnit, writer, options):
"""
GenerateCodeFromCompileUnit(self: CodeDomProvider, compileUnit: CodeCompileUnit, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM)
compilation unit and sends it to the specified text writer, using the specified
options.
compileUnit: A System.CodeDom.CodeCompileUnit for which to generate code.
writer: The System.IO.TextWriter to which the output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromExpression(self, expression, writer, options):
"""
GenerateCodeFromExpression(self: CodeDomProvider, expression: CodeExpression, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM)
expression and sends it to the specified text writer, using the specified
options.
expression: A System.CodeDom.CodeExpression object that indicates the expression for which
to generate code.
writer: The System.IO.TextWriter to which output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromMember(self, member, writer, options):
"""
GenerateCodeFromMember(self: CodeDomProvider, member: CodeTypeMember, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) member
declaration and sends it to the specified text writer, using the specified
options.
member: A System.CodeDom.CodeTypeMember object that indicates the member for which to
generate code.
writer: The System.IO.TextWriter to which output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromNamespace(self, codeNamespace, writer, options):
"""
GenerateCodeFromNamespace(self: CodeDomProvider, codeNamespace: CodeNamespace, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) namespace
and sends it to the specified text writer, using the specified options.
codeNamespace: A System.CodeDom.CodeNamespace object that indicates the namespace for which to
generate code.
writer: The System.IO.TextWriter to which output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromStatement(self, statement, writer, options):
"""
GenerateCodeFromStatement(self: CodeDomProvider, statement: CodeStatement, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) statement
and sends it to the specified text writer, using the specified options.
statement: A System.CodeDom.CodeStatement containing the CodeDOM elements for which to
generate code.
writer: The System.IO.TextWriter to which output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
def GenerateCodeFromType(self, codeType, writer, options):
"""
GenerateCodeFromType(self: CodeDomProvider, codeType: CodeTypeDeclaration, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified Code Document Object Model (CodeDOM) type
declaration and sends it to the specified text writer, using the specified
options.
codeType: A System.CodeDom.CodeTypeDeclaration object that indicates the type for which
to generate code.
writer: The System.IO.TextWriter to which output code is sent.
options: A System.CodeDom.Compiler.CodeGeneratorOptions that indicates the options to
use for generating code.
"""
pass
@staticmethod
def GetAllCompilerInfo():
"""
GetAllCompilerInfo() -> Array[CompilerInfo]
Returns the language provider and compiler configuration settings for this
computer.
Returns: An array of type System.CodeDom.Compiler.CompilerInfo representing the settings
of all configured System.CodeDom.Compiler.CodeDomProvider implementations.
"""
pass
@staticmethod
def GetCompilerInfo(language):
"""
GetCompilerInfo(language: str) -> CompilerInfo
Returns the language provider and compiler configuration settings for the
specified language.
language: A language name.
Returns: A System.CodeDom.Compiler.CompilerInfo object populated with settings of the
configured System.CodeDom.Compiler.CodeDomProvider implementation.
"""
pass
def GetConverter(self, type):
"""
GetConverter(self: CodeDomProvider, type: Type) -> TypeConverter
Gets a System.ComponentModel.TypeConverter for the specified data type.
type: The type of object to retrieve a type converter for.
Returns: A System.ComponentModel.TypeConverter for the specified type, or null if a
System.ComponentModel.TypeConverter for the specified type cannot be found.
"""
pass
@staticmethod
def GetLanguageFromExtension(extension):
"""
GetLanguageFromExtension(extension: str) -> str
Returns a language name associated with the specified file name extension, as
configured in the System.CodeDom.Compiler.CodeDomProvider compiler
configuration section.
extension: A file name extension.
Returns: A language name associated with the file name extension, as configured in the
System.CodeDom.Compiler.CodeDomProvider compiler configuration settings.
"""
pass
def GetService(self, *args): #cannot find CLR method
"""
GetService(self: Component, service: Type) -> object
Returns an object that represents a service provided by the
System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the
System.ComponentModel.Component, or null if the System.ComponentModel.Component
does not provide the specified service.
"""
pass
def GetTypeOutput(self, type):
"""
GetTypeOutput(self: CodeDomProvider, type: CodeTypeReference) -> str
Gets the type indicated by the specified System.CodeDom.CodeTypeReference.
type: A System.CodeDom.CodeTypeReference that indicates the type to return.
Returns: A text representation of the specified type, formatted for the language in
which code is generated by this code generator. In Visual Basic, for example,
passing in a System.CodeDom.CodeTypeReference for the System.Int32 type will
return "Integer".
"""
pass
@staticmethod
def IsDefinedExtension(extension):
"""
IsDefinedExtension(extension: str) -> bool
Tests whether a file name extension has an associated
System.CodeDom.Compiler.CodeDomProvider implementation configured on the
computer.
extension: A file name extension.
Returns: true if a System.CodeDom.Compiler.CodeDomProvider implementation is configured
for the specified file name extension; otherwise, false.
"""
pass
@staticmethod
def IsDefinedLanguage(language):
"""
IsDefinedLanguage(language: str) -> bool
Tests whether a language has a System.CodeDom.Compiler.CodeDomProvider
implementation configured on the computer.
language: The language name.
Returns: true if a System.CodeDom.Compiler.CodeDomProvider implementation is configured
for the specified language; otherwise, false.
"""
pass
def IsValidIdentifier(self, value):
"""
IsValidIdentifier(self: CodeDomProvider, value: str) -> bool
Returns a value that indicates whether the specified value is a valid
identifier for the current language.
value: The value to verify as a valid identifier.
Returns: true if the value parameter is a valid identifier; otherwise, false.
"""
pass
def MemberwiseClone(self, *args): #cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone, which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def Parse(self, codeStream):
"""
Parse(self: CodeDomProvider, codeStream: TextReader) -> CodeCompileUnit
Compiles the code read from the specified text stream into a
System.CodeDom.CodeCompileUnit.
codeStream: A System.IO.TextReader object that is used to read the code to be parsed.
Returns: A System.CodeDom.CodeCompileUnit that contains a representation of the parsed
code.
"""
pass
def Supports(self, generatorSupport):
"""
Supports(self: CodeDomProvider, generatorSupport: GeneratorSupport) -> bool
Returns a value indicating whether the specified code generation support is
provided.
generatorSupport: A System.CodeDom.Compiler.GeneratorSupport object that indicates the type of
code generation support to verify.
Returns: true if the specified code generation support is provided; otherwise, false.
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args): #cannot find CLR method
pass
CanRaiseEvents = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether the component can raise an event.
"""
DesignMode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FileExtension = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the default file name extension to use for source code files in the current language.
Get: FileExtension(self: CodeDomProvider) -> str
"""
LanguageOptions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a language features identifier.
Get: LanguageOptions(self: CodeDomProvider) -> LanguageOptions
"""
class CodeGeneratorOptions(object):
"""
Represents a set of options used by a code generator.
CodeGeneratorOptions()
"""
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
BlankLinesBetweenMembers = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to insert blank lines between members.
Get: BlankLinesBetweenMembers(self: CodeGeneratorOptions) -> bool
Set: BlankLinesBetweenMembers(self: CodeGeneratorOptions) = value
"""
BracingStyle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the style to use for bracing.
Get: BracingStyle(self: CodeGeneratorOptions) -> str
Set: BracingStyle(self: CodeGeneratorOptions) = value
"""
ElseOnClosing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to append an else, catch, or finally block, including brackets, at the closing line of each previous if or try block.
Get: ElseOnClosing(self: CodeGeneratorOptions) -> bool
Set: ElseOnClosing(self: CodeGeneratorOptions) = value
"""
IndentString = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the string to use for indentations.
Get: IndentString(self: CodeGeneratorOptions) -> str
Set: IndentString(self: CodeGeneratorOptions) = value
"""
VerbatimOrder = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to generate members in the order in which they occur in member collections.
Get: VerbatimOrder(self: CodeGeneratorOptions) -> bool
Set: VerbatimOrder(self: CodeGeneratorOptions) = value
"""
class CodeParser(object, ICodeParser):
""" Provides an empty implementation of the System.CodeDom.Compiler.ICodeParser interface. """
def Parse(self, codeStream):
"""
Parse(self: CodeParser, codeStream: TextReader) -> CodeCompileUnit
Compiles the specified text stream into a System.CodeDom.CodeCompileUnit.
codeStream: A System.IO.TextReader that is used to read the code to be parsed.
Returns: A System.CodeDom.CodeCompileUnit containing the code model produced from
parsing the code.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
class CompilerError(object):
"""
Represents a compiler error or warning.
CompilerError()
CompilerError(fileName: str, line: int, column: int, errorNumber: str, errorText: str)
"""
def ToString(self):
"""
ToString(self: CompilerError) -> str
Provides an implementation of Object's System.Object.ToString method.
Returns: A string representation of the compiler error.
"""
pass
@staticmethod # known case of __new__
def __new__(self, fileName=None, line=None, column=None, errorNumber=None, errorText=None):
"""
__new__(cls: type)
__new__(cls: type, fileName: str, line: int, column: int, errorNumber: str, errorText: str)
"""
pass
Column = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the column number where the source of the error occurs.
Get: Column(self: CompilerError) -> int
Set: Column(self: CompilerError) = value
"""
ErrorNumber = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the error number.
Get: ErrorNumber(self: CompilerError) -> str
Set: ErrorNumber(self: CompilerError) = value
"""
ErrorText = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the text of the error message.
Get: ErrorText(self: CompilerError) -> str
Set: ErrorText(self: CompilerError) = value
"""
FileName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the file name of the source file that contains the code which caused the error.
Get: FileName(self: CompilerError) -> str
Set: FileName(self: CompilerError) = value
"""
IsWarning = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value that indicates whether the error is a warning.
Get: IsWarning(self: CompilerError) -> bool
Set: IsWarning(self: CompilerError) = value
"""
Line = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the line number where the source of the error occurs.
Get: Line(self: CompilerError) -> int
Set: Line(self: CompilerError) = value
"""
class CompilerErrorCollection(CollectionBase, IList, ICollection, IEnumerable):
"""
Represents a collection of System.CodeDom.Compiler.CompilerError objects.
CompilerErrorCollection()
CompilerErrorCollection(value: CompilerErrorCollection)
CompilerErrorCollection(value: Array[CompilerError])
"""
def Add(self, value):
"""
Add(self: CompilerErrorCollection, value: CompilerError) -> int
Adds the specified System.CodeDom.Compiler.CompilerError object to the error
collection.
value: The System.CodeDom.Compiler.CompilerError object to add.
Returns: The index at which the new element was inserted.
"""
pass
def AddRange(self, value):
"""
AddRange(self: CompilerErrorCollection, value: CompilerErrorCollection)
Adds the contents of the specified compiler error collection to the end of the
error collection.
value: A System.CodeDom.Compiler.CompilerErrorCollection object that contains the
objects to add to the collection.
AddRange(self: CompilerErrorCollection, value: Array[CompilerError])
Copies the elements of an array to the end of the error collection.
value: An array of type System.CodeDom.Compiler.CompilerError that contains the
objects to add to the collection.
"""
pass
def Contains(self, value):
"""
Contains(self: CompilerErrorCollection, value: CompilerError) -> bool
Gets a value that indicates whether the collection contains the specified
System.CodeDom.Compiler.CompilerError object.
value: The System.CodeDom.Compiler.CompilerError to locate.
Returns: true if the System.CodeDom.Compiler.CompilerError is contained in the
collection; otherwise, false.
"""
pass
def CopyTo(self, array, index):
"""
CopyTo(self: CompilerErrorCollection, array: Array[CompilerError], index: int)
Copies the collection values to a one-dimensional System.Array instance at the
specified index.
array: The one-dimensional System.Array that is the destination of the values copied
from System.CodeDom.Compiler.CompilerErrorCollection.
index: The index in the array at which to start copying.
"""
pass
def IndexOf(self, value):
"""
IndexOf(self: CompilerErrorCollection, value: CompilerError) -> int
Gets the index of the specified System.CodeDom.Compiler.CompilerError object in
the collection, if it exists in the collection.
value: The System.CodeDom.Compiler.CompilerError to locate.
Returns: The index of the specified System.CodeDom.Compiler.CompilerError in the
System.CodeDom.Compiler.CompilerErrorCollection, if found; otherwise, -1.
"""
pass
def Insert(self, index, value):
"""
Insert(self: CompilerErrorCollection, index: int, value: CompilerError)
Inserts the specified System.CodeDom.Compiler.CompilerError into the collection
at the specified index.
index: The zero-based index where the compiler error should be inserted.
value: The System.CodeDom.Compiler.CompilerError to insert.
"""
pass
def OnClear(self, *args): #cannot find CLR method
"""
OnClear(self: CollectionBase)
Performs additional custom processes when clearing the contents of the
System.Collections.CollectionBase instance.
"""
pass
def OnClearComplete(self, *args): #cannot find CLR method
"""
OnClearComplete(self: CollectionBase)
Performs additional custom processes after clearing the contents of the
System.Collections.CollectionBase instance.
"""
pass
def OnInsert(self, *args): #cannot find CLR method
"""
OnInsert(self: CollectionBase, index: int, value: object)
Performs additional custom processes before inserting a new element into the
System.Collections.CollectionBase instance.
index: The zero-based index at which to insert value.
value: The new value of the element at index.
"""
pass
def OnInsertComplete(self, *args): #cannot find CLR method
"""
OnInsertComplete(self: CollectionBase, index: int, value: object)
Performs additional custom processes after inserting a new element into the
System.Collections.CollectionBase instance.
index: The zero-based index at which to insert value.
value: The new value of the element at index.
"""
pass
def OnRemove(self, *args): #cannot find CLR method
"""
OnRemove(self: CollectionBase, index: int, value: object)
Performs additional custom processes when removing an element from the
System.Collections.CollectionBase instance.
index: The zero-based index at which value can be found.
value: The value of the element to remove from index.
"""
pass
def OnRemoveComplete(self, *args): #cannot find CLR method
"""
OnRemoveComplete(self: CollectionBase, index: int, value: object)
Performs additional custom processes after removing an element from the
System.Collections.CollectionBase instance.
index: The zero-based index at which value can be found.
value: The value of the element to remove from index.
"""
pass
def OnSet(self, *args): #cannot find CLR method
"""
OnSet(self: CollectionBase, index: int, oldValue: object, newValue: object)
Performs additional custom processes before setting a value in the
System.Collections.CollectionBase instance.
index: The zero-based index at which oldValue can be found.
oldValue: The value to replace with newValue.
newValue: The new value of the element at index.
"""
pass
def OnSetComplete(self, *args): #cannot find CLR method
"""
OnSetComplete(self: CollectionBase, index: int, oldValue: object, newValue: object)
Performs additional custom processes after setting a value in the
System.Collections.CollectionBase instance.
index: The zero-based index at which oldValue can be found.
oldValue: The value to replace with newValue.
newValue: The new value of the element at index.
"""
pass
def OnValidate(self, *args): #cannot find CLR method
"""
OnValidate(self: CollectionBase, value: object)
Performs additional custom processes when validating a value.
value: The object to validate.
"""
pass
def Remove(self, value):
"""
Remove(self: CompilerErrorCollection, value: CompilerError)
Removes a specific System.CodeDom.Compiler.CompilerError from the collection.
value: The System.CodeDom.Compiler.CompilerError to remove from the
System.CodeDom.Compiler.CompilerErrorCollection.
"""
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+y """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
@staticmethod # known case of __new__
def __new__(self, value=None):
"""
__new__(cls: type)
__new__(cls: type, value: CompilerErrorCollection)
__new__(cls: type, value: Array[CompilerError])
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
HasErrors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the collection contains errors.
Get: HasErrors(self: CompilerErrorCollection) -> bool
"""
HasWarnings = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the collection contains warnings.
Get: HasWarnings(self: CompilerErrorCollection) -> bool
"""
InnerList = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets an System.Collections.ArrayList containing the list of elements in the System.Collections.CollectionBase instance.
"""
List = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets an System.Collections.IList containing the list of elements in the System.Collections.CollectionBase instance.
"""
class CompilerInfo(object):
""" Represents the configuration settings of a language provider. This class cannot be inherited. """
def CreateDefaultCompilerParameters(self):
"""
CreateDefaultCompilerParameters(self: CompilerInfo) -> CompilerParameters
Gets the configured compiler settings for the language provider implementation.
Returns: A read-only System.CodeDom.Compiler.CompilerParameters instance that contains
the compiler options and settings configured for the language provider.
"""
pass
def CreateProvider(self, providerOptions=None):
"""
CreateProvider(self: CompilerInfo) -> CodeDomProvider
Returns a System.CodeDom.Compiler.CodeDomProvider instance for the current
language provider settings.
Returns: A CodeDOM provider associated with the language provider configuration.
CreateProvider(self: CompilerInfo, providerOptions: IDictionary[str, str]) -> CodeDomProvider
"""
pass
def Equals(self, o):
"""
Equals(self: CompilerInfo, o: object) -> bool
Determines whether the specified object represents the same language provider
and compiler settings as the current System.CodeDom.Compiler.CompilerInfo.
o: The object to compare with the current System.CodeDom.Compiler.CompilerInfo.
Returns: true if o is a System.CodeDom.Compiler.CompilerInfo object and its value is the
same as this instance; otherwise, false.
"""
pass
def GetExtensions(self):
"""
GetExtensions(self: CompilerInfo) -> Array[str]
Returns the file name extensions supported by the language provider.
Returns: An array of file name extensions supported by the language provider.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: CompilerInfo) -> int
Returns the hash code for the current instance.
Returns: A 32-bit signed integer hash code for the current
System.CodeDom.Compiler.CompilerInfo instance, suitable for use in hashing
algorithms and data structures such as a hash table.
"""
pass
def GetLanguages(self):
"""
GetLanguages(self: CompilerInfo) -> Array[str]
Gets the language names supported by the language provider.
Returns: An array of language names supported by the language provider.
"""
pass
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
def __ne__(self, *args): #cannot find CLR method
pass
CodeDomProviderType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the type of the configured System.CodeDom.Compiler.CodeDomProvider implementation.
Get: CodeDomProviderType(self: CompilerInfo) -> Type
"""
IsCodeDomProviderTypeValid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Returns a value indicating whether the language provider implementation is configured on the computer.
Get: IsCodeDomProviderTypeValid(self: CompilerInfo) -> bool
"""
class CompilerParameters(object):
"""
Represents the parameters used to invoke a compiler.
CompilerParameters()
CompilerParameters(assemblyNames: Array[str])
CompilerParameters(assemblyNames: Array[str], outputName: str)
CompilerParameters(assemblyNames: Array[str], outputName: str, includeDebugInformation: bool)
"""
@staticmethod # known case of __new__
def __new__(self, assemblyNames=None, outputName=None, includeDebugInformation=None):
"""
__new__(cls: type)
__new__(cls: type, assemblyNames: Array[str])
__new__(cls: type, assemblyNames: Array[str], outputName: str)
__new__(cls: type, assemblyNames: Array[str], outputName: str, includeDebugInformation: bool)
"""
pass
CompilerOptions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the optional additional-command line arguments string to use when invoking the compiler.
Get: CompilerOptions(self: CompilerParameters) -> str
Set: CompilerOptions(self: CompilerParameters) = value
"""
CoreAssemblyFileName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: CoreAssemblyFileName(self: CompilerParameters) -> str
Set: CoreAssemblyFileName(self: CompilerParameters) = value
"""
EmbeddedResources = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the .NET Framework resource files to include when compiling the assembly output.
Get: EmbeddedResources(self: CompilerParameters) -> StringCollection
"""
Evidence = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Specifies an evidence object that represents the security policy permissions to grant the compiled assembly.
Get: Evidence(self: CompilerParameters) -> Evidence
Set: Evidence(self: CompilerParameters) = value
"""
GenerateExecutable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to generate an executable.
Get: GenerateExecutable(self: CompilerParameters) -> bool
Set: GenerateExecutable(self: CompilerParameters) = value
"""
GenerateInMemory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to generate the output in memory.
Get: GenerateInMemory(self: CompilerParameters) -> bool
Set: GenerateInMemory(self: CompilerParameters) = value
"""
IncludeDebugInformation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to include debug information in the compiled executable.
Get: IncludeDebugInformation(self: CompilerParameters) -> bool
Set: IncludeDebugInformation(self: CompilerParameters) = value
"""
LinkedResources = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the .NET Framework resource files that are referenced in the current source.
Get: LinkedResources(self: CompilerParameters) -> StringCollection
"""
MainClass = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the name of the main class.
Get: MainClass(self: CompilerParameters) -> str
Set: MainClass(self: CompilerParameters) = value
"""
OutputAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the name of the output assembly.
Get: OutputAssembly(self: CompilerParameters) -> str
Set: OutputAssembly(self: CompilerParameters) = value
"""
ReferencedAssemblies = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the assemblies referenced by the current project.
Get: ReferencedAssemblies(self: CompilerParameters) -> StringCollection
"""
TempFiles = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the collection that contains the temporary files.
Get: TempFiles(self: CompilerParameters) -> TempFileCollection
Set: TempFiles(self: CompilerParameters) = value
"""
TreatWarningsAsErrors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to treat warnings as errors.
Get: TreatWarningsAsErrors(self: CompilerParameters) -> bool
Set: TreatWarningsAsErrors(self: CompilerParameters) = value
"""
UserToken = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the user token to use when creating the compiler process.
Get: UserToken(self: CompilerParameters) -> IntPtr
Set: UserToken(self: CompilerParameters) = value
"""
WarningLevel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the warning level at which the compiler aborts compilation.
Get: WarningLevel(self: CompilerParameters) -> int
Set: WarningLevel(self: CompilerParameters) = value
"""
Win32Resource = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the file name of a Win32 resource file to link into the compiled assembly.
Get: Win32Resource(self: CompilerParameters) -> str
Set: Win32Resource(self: CompilerParameters) = value
"""
class CompilerResults(object):
"""
Represents the results of compilation that are returned from a compiler.
CompilerResults(tempFiles: TempFileCollection)
"""
@staticmethod # known case of __new__
def __new__(self, tempFiles):
""" __new__(cls: type, tempFiles: TempFileCollection) """
pass
CompiledAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the compiled assembly.
Get: CompiledAssembly(self: CompilerResults) -> Assembly
Set: CompiledAssembly(self: CompilerResults) = value
"""
Errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the collection of compiler errors and warnings.
Get: Errors(self: CompilerResults) -> CompilerErrorCollection
"""
Evidence = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Indicates the evidence object that represents the security policy permissions of the compiled assembly.
Get: Evidence(self: CompilerResults) -> Evidence
Set: Evidence(self: CompilerResults) = value
"""
NativeCompilerReturnValue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the compiler's return value.
Get: NativeCompilerReturnValue(self: CompilerResults) -> int
Set: NativeCompilerReturnValue(self: CompilerResults) = value
"""
Output = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the compiler output messages.
Get: Output(self: CompilerResults) -> StringCollection
"""
PathToAssembly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the path of the compiled assembly.
Get: PathToAssembly(self: CompilerResults) -> str
Set: PathToAssembly(self: CompilerResults) = value
"""
TempFiles = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the temporary file collection to use.
Get: TempFiles(self: CompilerResults) -> TempFileCollection
Set: TempFiles(self: CompilerResults) = value
"""
class Executor(object):
""" Provides command execution functions for invoking compilers. This class cannot be inherited. """
@staticmethod
def ExecWait(cmd, tempFiles):
"""
ExecWait(cmd: str, tempFiles: TempFileCollection)
Executes the command using the specified temporary files and waits for the call
to return.
cmd: The command to execute.
tempFiles: A System.CodeDom.Compiler.TempFileCollection with which to manage and store
references to intermediate files generated during compilation.
"""
pass
@staticmethod
def ExecWaitWithCapture(*__args):
"""
ExecWaitWithCapture(userToken: IntPtr, cmd: str, tempFiles: TempFileCollection, outputName: str, errorName: str) -> (int, str, str)
Executes the specified command using the specified user token and temporary
files, and waits for the call to return, storing output and error information
from the compiler in the specified strings.
userToken: The token to start the compiler process with.
cmd: The command to execute.
tempFiles: A System.CodeDom.Compiler.TempFileCollection with which to manage and store
references to intermediate files generated during compilation.
outputName: A reference to a string that will store the compiler's message output.
errorName: A reference to a string that will store the name of the error or errors
encountered.
Returns: The return value from the compiler.
ExecWaitWithCapture(userToken: IntPtr, cmd: str, currentDir: str, tempFiles: TempFileCollection, outputName: str, errorName: str) -> (int, str, str)
Executes the specified command using the specified user token, current
directory, and temporary files; then waits for the call to return, storing
output and error information from the compiler in the specified strings.
userToken: The token to start the compiler process with.
cmd: The command to execute.
currentDir: The directory to start the process in.
tempFiles: A System.CodeDom.Compiler.TempFileCollection with which to manage and store
references to intermediate files generated during compilation.
outputName: A reference to a string that will store the compiler's message output.
errorName: A reference to a string that will store the name of the error or errors
encountered.
Returns: The return value from the compiler.
ExecWaitWithCapture(cmd: str, tempFiles: TempFileCollection, outputName: str, errorName: str) -> (int, str, str)
Executes the specified command using the specified temporary files and waits
for the call to return, storing output and error information from the compiler
in the specified strings.
cmd: The command to execute.
tempFiles: A System.CodeDom.Compiler.TempFileCollection with which to manage and store
references to intermediate files generated during compilation.
outputName: A reference to a string that will store the compiler's message output.
errorName: A reference to a string that will store the name of the error or errors
encountered.
Returns: The return value from the compiler.
ExecWaitWithCapture(cmd: str, currentDir: str, tempFiles: TempFileCollection, outputName: str, errorName: str) -> (int, str, str)
Executes the specified command using the specified current directory and
temporary files, and waits for the call to return, storing output and error
information from the compiler in the specified strings.
cmd: The command to execute.
currentDir: The current directory.
tempFiles: A System.CodeDom.Compiler.TempFileCollection with which to manage and store
references to intermediate files generated during compilation.
outputName: A reference to a string that will store the compiler's message output.
errorName: A reference to a string that will store the name of the error or errors
encountered.
Returns: The return value from the compiler.
"""
pass
__all__ = [
'ExecWait',
'ExecWaitWithCapture',
]
class GeneratedCodeAttribute(Attribute, _Attribute):
"""
Identifies code generated by a tool. This class cannot be inherited.
GeneratedCodeAttribute(tool: str, version: str)
"""
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, tool, version):
""" __new__(cls: type, tool: str, version: str) """
pass
Tool = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the name of the tool that generated the code.
Get: Tool(self: GeneratedCodeAttribute) -> str
"""
Version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the version of the tool that generated the code.
Get: Version(self: GeneratedCodeAttribute) -> str
"""
class GeneratorSupport(Enum, IComparable, IFormattable, IConvertible):
"""
Defines identifiers used to determine whether a code generator supports certain types of code elements.
enum (flags) GeneratorSupport, values: ArraysOfArrays (1), AssemblyAttributes (4096), ChainedConstructorArguments (32768), ComplexExpressions (524288), DeclareDelegates (512), DeclareEnums (256), DeclareEvents (2048), DeclareIndexerProperties (33554432), DeclareInterfaces (1024), DeclareValueTypes (128), EntryPointMethod (2), GenericTypeDeclaration (16777216), GenericTypeReference (8388608), GotoStatements (4), MultidimensionalArrays (8), MultipleInterfaceMembers (131072), NestedTypes (65536), ParameterAttributes (8192), PartialTypes (4194304), PublicStaticMembers (262144), ReferenceParameters (16384), Resources (2097152), ReturnTypeAttributes (64), StaticConstructors (16), TryCatchStatements (32), Win32Resources (1048576)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
ArraysOfArrays = None
AssemblyAttributes = None
ChainedConstructorArguments = None
ComplexExpressions = None
DeclareDelegates = None
DeclareEnums = None
DeclareEvents = None
DeclareIndexerProperties = None
DeclareInterfaces = None
DeclareValueTypes = None
EntryPointMethod = None
GenericTypeDeclaration = None
GenericTypeReference = None
GotoStatements = None
MultidimensionalArrays = None
MultipleInterfaceMembers = None
NestedTypes = None
ParameterAttributes = None
PartialTypes = None
PublicStaticMembers = None
ReferenceParameters = None
Resources = None
ReturnTypeAttributes = None
StaticConstructors = None
TryCatchStatements = None
value__ = None
Win32Resources = None
class ICodeParser:
""" Defines an interface for parsing code into a System.CodeDom.CodeCompileUnit. """
def Parse(self, codeStream):
"""
Parse(self: ICodeParser, codeStream: TextReader) -> CodeCompileUnit
When implemented in a derived class, compiles the specified text stream into a
System.CodeDom.CodeCompileUnit.
codeStream: A System.IO.TextReader that can be used to read the code to be compiled.
Returns: A System.CodeDom.CodeCompileUnit that contains a representation of the parsed
code.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IndentedTextWriter(TextWriter, IDisposable):
"""
Provides a text writer that can indent new lines by a tab string token.
IndentedTextWriter(writer: TextWriter, tabString: str)
IndentedTextWriter(writer: TextWriter)
"""
def Close(self):
"""
Close(self: IndentedTextWriter)
Closes the document being written to.
"""
pass
def Dispose(self):
"""
Dispose(self: TextWriter, disposing: bool)
Releases the unmanaged resources used by the System.IO.TextWriter and
optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def Flush(self):
"""
Flush(self: IndentedTextWriter)
Flushes the stream.
"""
pass
def MemberwiseClone(self, *args): #cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone, which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def OutputTabs(self, *args): #cannot find CLR method
"""
OutputTabs(self: IndentedTextWriter)
Outputs the tab string once for each level of indentation according to the
System.CodeDom.Compiler.IndentedTextWriter.Indent property.
"""
pass
def Write(self, *__args):
"""
Write(self: IndentedTextWriter, value: object)
Writes the text representation of an object to the text stream.
value: The object to write.
Write(self: IndentedTextWriter, value: Int64)
Writes the text representation of an 8-byte integer to the text stream.
value: The 8-byte integer to write.
Write(self: IndentedTextWriter, value: int)
Writes the text representation of an integer to the text stream.
value: The integer to write.
Write(self: IndentedTextWriter, format: str, *arg: Array[object])
Writes out a formatted string, using the same semantics as specified.
format: The formatting string to use.
arg: The argument array to output.
Write(self: IndentedTextWriter, format: str, arg0: object, arg1: object)
Writes out a formatted string, using the same semantics as specified.
format: The formatting string to use.
arg0: The first object to write into the formatted string.
arg1: The second object to write into the formatted string.
Write(self: IndentedTextWriter, format: str, arg0: object)
Writes out a formatted string, using the same semantics as specified.
format: The formatting string.
arg0: The object to write into the formatted string.
Write(self: IndentedTextWriter, value: Single)
Writes the text representation of a Single to the text stream.
value: The single to write.
Write(self: IndentedTextWriter, value: Char)
Writes a character to the text stream.
value: The character to write.
Write(self: IndentedTextWriter, value: bool)
Writes the text representation of a Boolean value to the text stream.
value: The Boolean value to write.
Write(self: IndentedTextWriter, s: str)
Writes the specified string to the text stream.
s: The string to write.
Write(self: IndentedTextWriter, value: float)
Writes the text representation of a Double to the text stream.
value: The double to write.
Write(self: IndentedTextWriter, buffer: Array[Char], index: int, count: int)
Writes a subarray of characters to the text stream.
buffer: The character array to write data from.
index: Starting index in the buffer.
count: The number of characters to write.
Write(self: IndentedTextWriter, buffer: Array[Char])
Writes a character array to the text stream.
buffer: The character array to write.
"""
pass
def WriteLine(self, *__args):
"""
WriteLine(self: IndentedTextWriter, value: object)
Writes the text representation of an object, followed by a line terminator, to
the text stream.
value: The object to write.
WriteLine(self: IndentedTextWriter, value: Int64)
Writes the text representation of an 8-byte integer, followed by a line
terminator, to the text stream.
value: The 8-byte integer to write.
WriteLine(self: IndentedTextWriter, value: int)
Writes the text representation of an integer, followed by a line terminator, to
the text stream.
value: The integer to write.
WriteLine(self: IndentedTextWriter, format: str, arg0: object)
Writes out a formatted string, followed by a line terminator, using the same
semantics as specified.
format: The formatting string.
arg0: The object to write into the formatted string.
WriteLine(self: IndentedTextWriter, value: UInt32)
Writes the text representation of a UInt32, followed by a line terminator, to
the text stream.
value: A UInt32 to output.
WriteLine(self: IndentedTextWriter, format: str, *arg: Array[object])
Writes out a formatted string, followed by a line terminator, using the same
semantics as specified.
format: The formatting string to use.
arg: The argument array to output.
WriteLine(self: IndentedTextWriter, format: str, arg0: object, arg1: object)
Writes out a formatted string, followed by a line terminator, using the same
semantics as specified.
format: The formatting string to use.
arg0: The first object to write into the formatted string.
arg1: The second object to write into the formatted string.
WriteLine(self: IndentedTextWriter, value: Single)
Writes the text representation of a Single, followed by a line terminator, to
the text stream.
value: The single to write.
WriteLine(self: IndentedTextWriter, value: bool)
Writes the text representation of a Boolean, followed by a line terminator, to
the text stream.
value: The Boolean to write.
WriteLine(self: IndentedTextWriter)
Writes a line terminator.
WriteLine(self: IndentedTextWriter, s: str)
Writes the specified string, followed by a line terminator, to the text stream.
s: The string to write.
WriteLine(self: IndentedTextWriter, value: Char)
Writes a character, followed by a line terminator, to the text stream.
value: The character to write.
WriteLine(self: IndentedTextWriter, value: float)
Writes the text representation of a Double, followed by a line terminator, to
the text stream.
value: The double to write.
WriteLine(self: IndentedTextWriter, buffer: Array[Char], index: int, count: int)
Writes a subarray of characters, followed by a line terminator, to the text
stream.
buffer: The character array to write data from.
index: Starting index in the buffer.
count: The number of characters to write.
WriteLine(self: IndentedTextWriter, buffer: Array[Char])
Writes a character array, followed by a line terminator, to the text stream.
buffer: The character array to write.
"""
pass
def WriteLineNoTabs(self, s):
"""
WriteLineNoTabs(self: IndentedTextWriter, s: str)
Writes the specified string to a line without tabs.
s: The string to write.
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, writer, tabString=None):
"""
__new__(cls: type, writer: TextWriter)
__new__(cls: type, writer: TextWriter, tabString: str)
"""
pass
Encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the encoding for the text writer to use.
Get: Encoding(self: IndentedTextWriter) -> Encoding
"""
Indent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the number of spaces to indent.
Get: Indent(self: IndentedTextWriter) -> int
Set: Indent(self: IndentedTextWriter) = value
"""
InnerWriter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the System.IO.TextWriter to use.
Get: InnerWriter(self: IndentedTextWriter) -> TextWriter
"""
NewLine = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the new line character to use.
Get: NewLine(self: IndentedTextWriter) -> str
Set: NewLine(self: IndentedTextWriter) = value
"""
CoreNewLine = None
DefaultTabString = ' '
class LanguageOptions(Enum, IComparable, IFormattable, IConvertible):
"""
Defines identifiers that indicate special features of a language.
enum (flags) LanguageOptions, values: CaseInsensitive (1), None (0)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
CaseInsensitive = None
None = None
value__ = None
class TempFileCollection(object, ICollection, IEnumerable, IDisposable):
"""
Represents a collection of temporary files.
TempFileCollection(tempDir: str, keepFiles: bool)
TempFileCollection()
TempFileCollection(tempDir: str)
"""
def AddExtension(self, fileExtension, keepFile=None):
"""
AddExtension(self: TempFileCollection, fileExtension: str) -> str
Adds a file name with the specified file name extension to the collection.
fileExtension: The file name extension for the auto-generated temporary file name to add to
the collection.
Returns: A file name with the specified extension that was just added to the collection.
AddExtension(self: TempFileCollection, fileExtension: str, keepFile: bool) -> str
Adds a file name with the specified file name extension to the collection,
using the specified value indicating whether the file should be deleted or
retained.
fileExtension: The file name extension for the auto-generated temporary file name to add to
the collection.
keepFile: true if the file should be kept after use; false if the file should be deleted.
Returns: A file name with the specified extension that was just added to the collection.
"""
pass
def AddFile(self, fileName, keepFile):
"""
AddFile(self: TempFileCollection, fileName: str, keepFile: bool)
Adds the specified file to the collection, using the specified value indicating
whether to keep the file after the collection is disposed or when the
System.CodeDom.Compiler.TempFileCollection.Delete method is called.
fileName: The name of the file to add to the collection.
keepFile: true if the file should be kept after use; false if the file should be deleted.
"""
pass
def CopyTo(self, fileNames, start):
"""
CopyTo(self: TempFileCollection, fileNames: Array[str], start: int)
Copies the members of the collection to the specified string, beginning at the
specified index.
fileNames: The array of strings to copy to.
start: The index of the array to begin copying to.
"""
pass
def Delete(self):
"""
Delete(self: TempFileCollection)
Deletes the temporary files within this collection that were not marked to be
kept.
"""
pass
def Dispose(self, *args): #cannot find CLR method
"""
Dispose(self: TempFileCollection, disposing: bool)
Releases the unmanaged resources used by the
System.CodeDom.Compiler.TempFileCollection and optionally releases the managed
resources.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: TempFileCollection) -> IEnumerator
Gets an enumerator that can enumerate the members of the collection.
Returns: An System.Collections.IEnumerator that contains the collection's members.
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(self, tempDir=None, keepFiles=None):
"""
__new__(cls: type)
__new__(cls: type, tempDir: str)
__new__(cls: type, tempDir: str, keepFiles: bool)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
BasePath = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the full path to the base file name, without a file name extension, on the temporary directory path, that is used to generate temporary file names for the collection.
Get: BasePath(self: TempFileCollection) -> str
"""
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the number of files in the collection.
Get: Count(self: TempFileCollection) -> int
"""
KeepFiles = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether to keep the files, by default, when the System.CodeDom.Compiler.TempFileCollection.Delete method is called or the collection is disposed.
Get: KeepFiles(self: TempFileCollection) -> bool
Set: KeepFiles(self: TempFileCollection) = value
"""
TempDir = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the temporary directory to store the temporary files in.
Get: TempDir(self: TempFileCollection) -> str
"""
| 39.609779
| 737
| 0.61809
| 16,760
| 170,124
| 6.220406
| 0.056802
| 0.022762
| 0.035586
| 0.045754
| 0.815115
| 0.795528
| 0.768911
| 0.751981
| 0.740077
| 0.719387
| 0
| 0.001549
| 0.316734
| 170,124
| 4,294
| 738
| 39.619003
| 0.895337
| 0.040282
| 0
| 0.772674
| 0
| 0
| 0.000925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.408716
| 0.004711
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
91c2d3a8e445d75cf50b00ee9243adf64c5d6335
| 18,242
|
py
|
Python
|
main.py
|
mukcyul/testRuf
|
1322d914d875f8c7827a25da7917d41d21699031
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
mukcyul/testRuf
|
1322d914d875f8c7827a25da7917d41d21699031
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
mukcyul/testRuf
|
1322d914d875f8c7827a25da7917d41d21699031
|
[
"CC0-1.0"
] | null | null | null |
#######main
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.screenmanager import ScreenManager, Screen
from instructions import *
from scrollLabel import *
from seconds import *
from sits import *
from runner import *
from ruffier import *
##Hello
age=0
name=''
pulse1=0
pulse2=0
pulse3=0
#класс первого экрана
class FirstScr(Screen):
def __init__(self,name='first'):
super().__init__(name=name)
lay1=BoxLayout(orientation='vertical')
layh1=BoxLayout(size_hint=(1,.1))
layh2=BoxLayout(size_hint=(1,.1))
label_n=Label(text="Ваше имя: ")
label_age=Label(text="Введите Ваш возраст: ")
scl = ScrollLabel(ltext=txt_instruction,pos_hint={'center_y':.5})
self.name_val=TextInput(text='', multiline=False, size_hint=(.75,.9), pos_hint={"top":1,'center_x':.5})
self.age_val=TextInput(text='7', multiline=False, size_hint=(.75,.9), pos_hint={"top":1,'center_x':.5})
self.btn=Button(text="Продолжить",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press=self.next
layh1.add_widget(label_n)
layh2.add_widget(label_age)
layh1.add_widget(self.name_val)
layh2.add_widget(self.age_val)
lay1.add_widget(scl)
lay1.add_widget(layh1)
lay1.add_widget(layh2)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
global age, name
age=int(self.age_val.text)
name=self.name_val.text
self.manager.current='second'
class SecondScr(Screen):
def __init__(self, name='second'):
super().__init__(name=name)
self.next_screen=False
lay1=BoxLayout(orientation='vertical')
lay2=BoxLayout(size_hint=(1,.1))
scrl=ScrollLabel(txt_test1)
label=Label(text='Введите результат:')
self.sec=Seconds(15,pos_hint={'center_x':.5})
self.sec.bind(done=self.sec_finished)
self.ti=TextInput(text='0', multiline=False, size_hint=(.75,1.3), pos_hint={"top":1.3,'center_x':.5})
self.ti.set_disabled(True)
self.btn=Button(text="Продолжить",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press=self.next
lay2.add_widget(label)
lay2.add_widget(self.ti)
lay1.add_widget(scrl)
lay1.add_widget(self.sec)
lay1.add_widget(lay2)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
if self.next_screen==False:
self.sec.start()
self.btn.set_disabled(True)
else:
global pulse1
pulse1=int(self.ti.text)
self.manager.current='third'
def sec_finished(self,*args):
self.btn.set_disabled(False)
self.btn.text='Продолжить'
self.ti.set_disabled(False)
self.next_screen=True
class ThirdScr(Screen):
def __init__(self, name='third'):
super().__init__(name=name)
self.next_screen=False
instr=ScrollLabel(txt_sits,size_hint=(1,.1))
self.sits_value=Sits(30,size_hint=(1,.1))
self.run=Runner(total=30, steptime=1.5)
self.run.bind(finished=self.run_finished)
self.btn=Button(text='Начать', size_hint=(1,.1))
self.btn.on_press=self.next
lay1=BoxLayout(orientation='vertical')
lay1.add_widget(instr)
lay1.add_widget(self.sits_value)
lay1.add_widget(self.run)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
if self.next_screen==False:
self.run.start()
self.btn.set_disabled(True)
self.run.bind(value=self.sits_value.next)
else:
self.manager.current='fourth'
def run_finished(self,*args):
self.next_screen=True
self.btn.set_disabled(False)
self.btn.text='Продолжить'
class FourthScr(Screen):
def __init__(self, name='fourth'):
super().__init__(name=name)
# флаг перехода к следующей страницы в False
self.next_screen = False
# режим работы секундомера
self.stage = 0
# создаём виджеты
instr1 = ScrollLabel(txt_test3)
self.instr2 = ScrollLabel('Считайте пульс')
# создаём секундомер (первый раз считает 15 секунд)
self.sec = Seconds(15)
# по завершению работы вызовем метод sec_finished
self.sec.bind(done = self.sec_finished)
res1_label = Label(text='Результат:')
self.res1_value = TextInput(text='0')
self.res1_value.set_disabled(True)
res2_label = Label(text='Результат после отдыха:')
self.res2_value = TextInput(text='0')
self.res2_value.set_disabled(True)
self.but = Button(text='Начать',size_hint=(1,0.2))
self.but.on_press = self.next
# размещаем виджеты по направляющим
hor1 = BoxLayout(size_hint=(1,0.2))
hor1.add_widget(res1_label)
hor1.add_widget(self.res1_value)
hor2 = BoxLayout(size_hint=(1,0.2))
hor2.add_widget(res2_label)
hor2.add_widget(self.res2_value)
ver = BoxLayout(orientation = 'vertical')
ver.add_widget(instr1)
ver.add_widget(self.instr2)
ver.add_widget(self.sec)
ver.add_widget(hor1)
ver.add_widget(hor2)
ver.add_widget(self.but)
self.add_widget(ver)
# метод срабатывает по завершению работы секундомера
def sec_finished(self, instance, value):
if value:
# меняем режим работы секундомера (+1)
self.stage += 1
# если отсчитали первые 15 секунд
if self.stage == 1:
# напишем Отдыхайте
self.instr2.set_text('Отдыхайте')
# перезапустим секундомер на 30 секунд
self.sec.restart(30)
# разблокировка res1_value
self.res1_value.set_disabled(False)
# если отсчитали второй раз (30 секунд )
elif self.stage == 2:
# напишем считайте пульс
self.instr2.set_text('Считайте пульт')
# перезапускаем таймер на 15 секунд
self.sec.restart(15)
# если отсчитали третий раз (15 секунд)
elif self.stage == 3:
# разблокировка res2_value
self.res2_value.set_disabled(False)
# меняем текст кнопкп
self.but.text = 'Завершить'
# разблокировка but
self.but.set_disabled(False)
# можно переходить на следующий экран
self.next_screen = True
# метод отрабатываем при клике по кнопке
def next(self):
# если переходить на следующий экран нельзя
if self.next_screen == False:
# блокируем кнопку
self.but.set_disabled(True)
# запускаем секундомер (первый раз 15 сек)
self.sec.start()
else:
# запоминаем пульс в глобальных переменных
global pulse2, pulse3
pulse2 = int(self.res1_value.text)
pulse3 = int(self.res2_value.text)
# идем на четвертый экран
self.manager.current = 'fiveth'
# функция для формирования строки с результатом
def get_result():
# вызываем функцию test из ruffier.py
res = test(pulse1, pulse2, pulse3, age)
# возвращаем строку с именем и результатом
return name + '\n' + res[0] + '\n' + res[1]
class FivethScr(Screen):
def __init__(self, name='fiveth'):
super().__init__(name=name)
ver = BoxLayout(orientation='vertical', padding=8, spacing=8)
# добавили один виджет, в нём будет результат измерений
self.instr = ScrollLabel('')
self.btn=Button(text="Заново",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press = self.next
ver.add_widget(self.instr)
ver.add_widget(self.btn)
self.add_widget(ver)
# при показе экрана вызвать метод before
self.on_enter = self.before
def before(self):
# установить в виджет результат get_result
self.instr.set_text(get_result())
def next(self):
self.manager.current = 'first'
class SixthScr(Screen):
def __init__(self, name='sixth'):
super().__init__(name=name)
class MainScr(Screen):
def __init__(self,name='main'):
super().__init__(name=name)
class MyApp(App):
def build(self):
sm=ScreenManager()
#sm.add_widget(MainScr(name='main'))
sm.add_widget(FirstScr(name='first'))
sm.add_widget(SecondScr(name='second'))
sm.add_widget(ThirdScr(name='third'))
sm.add_widget(FourthScr(name='fourth'))
sm.add_widget(FivethScr(name='fiveth'))
sm.add_widget(SixthScr(name='sixth'))
return sm
MyApp().run()
=======
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.screenmanager import ScreenManager, Screen
##hi
######################### 123
main
from instructions import *
from scrollLabel import *
from seconds import *
from sits import *
from runner import *
from ruffier import *
##Hello
age=0
name=''
pulse1=0
pulse2=0
pulse3=0
#класс Первого экрана
class FirstScr(Screen):
def __init__(self,name='first'):
super().__init__(name=name)
lay1=BoxLayout(orientation='vertical')
layh1=BoxLayout(size_hint=(1,.1))
layh2=BoxLayout(size_hint=(1,.1))
label_n=Label(text="Ваше имя: ")
label_age=Label(text="Введите Ваш возраст: ")
scl = ScrollLabel(ltext=txt_instruction,pos_hint={'center_y':.5})
self.name_val=TextInput(text='', multiline=False, size_hint=(.75,.9), pos_hint={"top":1,'center_x':.5})
self.age_val=TextInput(text='7', multiline=False, size_hint=(.75,.9), pos_hint={"top":1,'center_x':.5})
self.btn=Button(text="Продолжить",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press=self.next
layh1.add_widget(label_n)
layh2.add_widget(label_age)
layh1.add_widget(self.name_val)
layh2.add_widget(self.age_val)
lay1.add_widget(scl)
lay1.add_widget(layh1)
lay1.add_widget(layh2)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
global age, name
age=int(self.age_val.text)
name=self.name_val.text
self.manager.current='second'
class SecondScr(Screen):
def __init__(self, name='second'):
super().__init__(name=name)
self.next_screen=False
lay1=BoxLayout(orientation='vertical')
lay2=BoxLayout(size_hint=(1,.1))
scrl=ScrollLabel(txt_test1)
label=Label(text='Введите результат:')
self.sec=Seconds(15,pos_hint={'center_x':.5})
self.sec.bind(done=self.sec_finished)
self.ti=TextInput(text='0', multiline=False, size_hint=(.75,1.3), pos_hint={"top":1.3,'center_x':.5})
self.ti.set_disabled(True)
self.btn=Button(text="Продолжить",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press=self.next
lay2.add_widget(label)
lay2.add_widget(self.ti)
lay1.add_widget(scrl)
lay1.add_widget(self.sec)
lay1.add_widget(lay2)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
if self.next_screen==False:
self.sec.start()
self.btn.set_disabled(True)
else:
global pulse1
pulse1=int(self.ti.text)
self.manager.current='third'
def sec_finished(self,*args):
self.btn.set_disabled(False)
self.btn.text='Продолжить'
self.ti.set_disabled(False)
self.next_screen=True
class ThirdScr(Screen):
def __init__(self, name='third'):
super().__init__(name=name)
self.next_screen=False
instr=ScrollLabel(txt_sits,size_hint=(1,.1))
self.sits_value=Sits(30,size_hint=(1,.1))
self.run=Runner(total=30, steptime=1.5)
self.run.bind(finished=self.run_finished)
self.btn=Button(text='Начать', size_hint=(1,.1))
self.btn.on_press=self.next
lay1=BoxLayout(orientation='vertical')
lay1.add_widget(instr)
lay1.add_widget(self.sits_value)
lay1.add_widget(self.run)
lay1.add_widget(self.btn)
self.add_widget(lay1)
def next(self):
if self.next_screen==False:
self.run.start()
self.btn.set_disabled(True)
self.run.bind(value=self.sits_value.next)
else:
self.manager.current='fourth'
def run_finished(self,*args):
self.next_screen=True
self.btn.set_disabled(False)
self.btn.text='Продолжить'
class FourthScr(Screen):
def __init__(self, name='fourth'):
super().__init__(name=name)
# флаг перехода к следующей страницы в False
self.next_screen = False
# режим работы секундомера
self.stage = 0
# создаём виджеты
instr1 = ScrollLabel(txt_test3)
self.instr2 = ScrollLabel('Считайте пульс')
# создаём секундомер (первый раз считает 15 секунд)
self.sec = Seconds(15)
# по завершению работы вызовем метод sec_finished
self.sec.bind(done = self.sec_finished)
res1_label = Label(text='Результат:')
self.res1_value = TextInput(text='0')
self.res1_value.set_disabled(True)
res2_label = Label(text='Результат после отдыха:')
self.res2_value = TextInput(text='0')
self.res2_value.set_disabled(True)
self.but = Button(text='Начать',size_hint=(1,0.2))
self.but.on_press = self.next
# размещаем виджеты по направляющим
hor1 = BoxLayout(size_hint=(1,0.2))
hor1.add_widget(res1_label)
hor1.add_widget(self.res1_value)
hor2 = BoxLayout(size_hint=(1,0.2))
hor2.add_widget(res2_label)
hor2.add_widget(self.res2_value)
ver = BoxLayout(orientation = 'vertical')
ver.add_widget(instr1)
ver.add_widget(self.instr2)
ver.add_widget(self.sec)
ver.add_widget(hor1)
ver.add_widget(hor2)
ver.add_widget(self.but)
self.add_widget(ver)
# метод срабатывает по завершению работы секундомера
def sec_finished(self, instance, value):
if value:
# меняем режим работы секундомера (+1)
self.stage += 1
# если отсчитали первые 15 секунд
if self.stage == 1:
# напишем Отдыхайте
self.instr2.set_text('Отдыхайте')
# перезапустим секундомер на 30 секунд
self.sec.restart(30)
# разблокировка res1_value
self.res1_value.set_disabled(False)
# если отсчитали второй раз (30 секунд )
elif self.stage == 2:
# напишем считайте пульс
self.instr2.set_text('Считайте пульт')
# перезапускаем таймер на 15 секунд
self.sec.restart(15)
# если отсчитали третий раз (15 секунд)
elif self.stage == 3:
# разблокировка res2_value
self.res2_value.set_disabled(False)
# меняем текст кнопкп
self.but.text = 'Завершить'
# разблокировка but
self.but.set_disabled(False)
# можно переходить на следующий экран
self.next_screen = True
# метод отрабатываем при клике по кнопке
def next(self):
# если переходить на следующий экран нельзя
if self.next_screen == False:
# блокируем кнопку
self.but.set_disabled(True)
# запускаем секундомер (первый раз 15 сек)
self.sec.start()
else:
# запоминаем пульс в глобальных переменных
global pulse2, pulse3
pulse2 = int(self.res1_value.text)
pulse3 = int(self.res2_value.text)
# идем на четвертый экран
self.manager.current = 'fiveth'
# функция для формирования строки с результатом
def get_result():
# вызываем функцию test из ruffier.py
res = test(pulse1, pulse2, pulse3, age)
# возвращаем строку с именем и результатом
return name + '\n' + res[0] + '\n' + res[1]
class FivethScr(Screen):
def __init__(self, name='fiveth'):
super().__init__(name=name)
ver = BoxLayout(orientation='vertical', padding=8, spacing=8)
# добавили один виджет, в нём будет результат измерений
self.instr = ScrollLabel('')
self.btn=Button(text="Заново",size_hint=(.33,.23), pos_hint={'top':0.5, 'center_x':.5})
self.btn.on_press = self.next
ver.add_widget(self.instr)
ver.add_widget(self.btn)
self.add_widget(ver)
# при показе экрана вызвать метод before
self.on_enter = self.before
def before(self):
# установить в виджет результат get_result
self.instr.set_text(get_result())
def next(self):
self.manager.current = 'first'
class SixthScr(Screen):
def __init__(self, name='sixth'):
super().__init__(name=name)
class MainScr(Screen):
def __init__(self,name='main'):
super().__init__(name=name)
class MyApp(App):
def build(self):
sm=ScreenManager()
#sm.add_widget(MainScr(name='main'))
sm.add_widget(FirstScr(name='first'))
sm.add_widget(SecondScr(name='second'))
sm.add_widget(ThirdScr(name='third'))
sm.add_widget(FourthScr(name='fourth'))
sm.add_widget(FivethScr(name='fiveth'))
sm.add_widget(SixthScr(name='sixth'))
return sm
MyApp().run()
#######main
| 33.47156
| 111
| 0.613803
| 2,333
| 18,242
| 4.626232
| 0.099014
| 0.070045
| 0.038544
| 0.022051
| 0.998425
| 0.998425
| 0.998425
| 0.998425
| 0.998425
| 0.998425
| 0
| 0.027282
| 0.266583
| 18,242
| 544
| 112
| 33.533088
| 0.77943
| 0.129043
| 0
| 0.994924
| 0
| 0
| 0.052488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.06599
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91e622b4fe48c9d86042f7ce1ec38d4a54688749
| 93,220
|
py
|
Python
|
state_scrapper/testCode/testConn.py
|
nikmend/state-scrapper
|
39c902320ea97605857ef74789e578dbdb7ccfd0
|
[
"CC0-1.0"
] | null | null | null |
state_scrapper/testCode/testConn.py
|
nikmend/state-scrapper
|
39c902320ea97605857ef74789e578dbdb7ccfd0
|
[
"CC0-1.0"
] | null | null | null |
state_scrapper/testCode/testConn.py
|
nikmend/state-scrapper
|
39c902320ea97605857ef74789e578dbdb7ccfd0
|
[
"CC0-1.0"
] | null | null | null |
import mysql.connector
mydb = mysql.connector.connect(
database='mobicrol_DB',
host="mobicrol.heliohost.org",
user="mobicrol_admin",
password="X->e^QW%K{|v12~#"
)
mycursor = mydb.cursor()
#mycursor.execute("Select * from states")
#myresult = mycursor.fetchall()
myli = [
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '5434-C0003-08', 'Proyecto LUMINIUM, Apartamentos nuevos en Rincon del chico, Bogotxc3xa1 D.C. - Id 003', 'Exclusivo proyecto con excelente ubicacixc3xb3n, 70 apartamentos en 8 pisos con las mejores especificaciones (ventanerxc3xada acxc3xbastica en habitaciones). Lobby tipo hotel, Salxc3xb3n social, Lockers para las empleadas del servicio domxc3xa9stico con baxc3xb1o, Business center, Oficina de administracixc3xb3n y Terraza comunal con Gimnasio, pista de trote, parque infantil, spot de mascotas y bbq. Parqueadero de visitantes, bicicletero, planta elxc3xa9ctrica de suplencia total y sistema de captacixc3xb3n de aguas pluviales. 22 meses para pago de cuota inicial', '392564000', 'https://www.metrocuadrado.com/proyecto/luminium/5434-C0003', 'Apartamento', 'Bogotxc3xa1 D.C.', 'CHICO SAN PATRICIO', '4.68919', '-74.04276', 'Calle 106 13-28', '110111', '52.73', '47.58', '2020-08-08', '5', 'NULL', 'En Construccion', '2', '2', '1', '{\"CODIGO_WEB:\": \"5434-C0003\", \"NOMBRE_COMUN_DEL_BARRIO\": \"CHICO SAN PATRICIO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"RINCON DEL CHICO\", \"ESTRATO\": \"5\", \"VALOR_DESDE/HASTA\": \"Desde $347.108.000 hasta $625.004.000\", \"AREA_CONSTRUIDA_DESDE/HASTA\": \"Desde 38.99 hasta 79.63\", \"AREA_PRIVADA_DESDE/HASTA\": \"Desde 35.13 hasta 86.07\", \"TIEMPO_DE_CONSTRUIDO\": \"En Construccion\", \"ETAPA_DE_CONSTRUCCION TIEMPO_DE_CONSTRUIDO\": \"En construccion\", \"MONEDA\": \"Peso colombiano\", \"VIGILANCIA\": \"24hrs\", \"FECHA_ESTIMADA_DE_ENTREGA\": \"04-2022\", \"PARQUEADERO_DE_VISITANTES\": \"Si\"}', 'Consucasa SAS', 'Calle 119 # 72 a - 26', '3212686024/ (1) 7430066 Ext: 531/ 3213137285', '573213137285' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-446', 'Venta de Apartamento en Santa monica - Bogotxc3xa1 D.C. - 681-446', 'Apartamento ubicado en exclusivo y central sector, habitaciones amplias para la familia, inmueble en buen estado, buenas vxc3xadas de acceso', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-sabanalarga-3-habitaciones-3-banos-2-garajes/681-446', 'Apartamento', '930000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Santa Monica', '4.707255893469122', '-74.04345375514912', 'Calle 127ABISA 15-58', '110121', '180.00', '180.00', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-446\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Santa Monica\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SABANALARGA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$930.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$690.600\", \"AREA_CONSTRUIDA\": \"180.00 m\", \"AREA_PRIVADA\": \"180.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"CUARTO_DE_SERVICIO\": \"Si\", \"TIPO_DE_CORTINAS\": \"Persianas\", \"complementsAndFinishes\": [\"Con chimenea\", \"Gimnasio\", \"Salon comunal\", \"Zona para ninos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-432', 'Venta y arriendo de Bodega en Prado veraniego - Bogotxc3xa1 D.C. - 681-432', 'Arriendo o vendo bodega situada en el barrio prado veraniego, de tres pisos, tres baxc3xb1os , puntos de luz trifxc3xa1sica y monofxc3xa1sica doscientos veinte y ciento diez v ,carga de energxc3xada treintakw con trifxc3xa1sica , la placa de la bodega soporta un peso de dos.cero klg por metro cuadrado en el primer piso, en los pisos dos y tres el peso a soportar es de doscientoskg por metro cuadrado.', 'https://www.metrocuadrado.com/inmueble/venta-bodega-bogota-prado-veraniego-norte/681-432', 'Bodega', '2000000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'PRADO VERANIEGO', '4.718366979453166', '-74.06691208047147', 'Carrera 57A 129B-1', '111111', '390.00', '390.00', '2020-08-08', '3', 'NULL', 'Entre 0 y 5 anos', '', '', '', '{\"CODIGO_WEB\": \"681-432\", \"NOMBRE_COMUN_DEL_BARRIO\": \"PRADO VERANIEGO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"PRADO VERANIEGO NORTE\", \"ESTRATO\": \"3\", \"VALOR_DE_ARRIENDO\": \"$14.000.000\", \"VALOR_DE_VENTA\": \"$2.000.000.000\", \"AREA_CONSTRUIDA\": \"390.00 m\", \"AREA_PRIVADA\": \"390.00 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"TIPO_BODEGA\": \"Otro\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-429', 'Venta de Apartamento en Chico alto - Bogotxc3xa1 D.C. - 681-429', 'Hermoso apartamento, ubicado en exclusivo sector de la ciudad. Duplex, espectacular zona social, tres alcobas, dos de ellas con walking closet y baxc3xb1o. Todas las xc3xa1reas del apartamento tienen acceso a las terrazas.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-bosque-de-bellavista-3-habitaciones-5-banos-2-garajes/681-429', 'Apartamento', '3200000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Chico Alto', '4.674491536261334', '-74.02694634878162', 'Carrera 1G Este 101B-0', '110111', '374.00', '305.00', '2020-08-08', '6', 'NULL', 'Entre 0 y 5 anos', '5', '5', '2', '{\"CODIGO_WEB\": \"681-429\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Chico Alto\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"BOSQUE DE BELLAVISTA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$3.200.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$3.128.000\", \"AREA_CONSTRUIDA\": \"374.00 m\", \"AREA_PRIVADA\": \"305.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-M2261735', 'Venta de Casa en Gratamira - Bogotxc3xa1 D.C. - 681-M2261735', 'Hermosa casa, en conjunto cerrado con cancha de fxc3xbatbol, cancha de tenis, parque, senderos peatonales, sector tranquilo y exclusivo, total de casas 23 . La casa cuenta con 4 habitaciones, estudio, alcoba y baxc3xb1o de servicio, zona de bbq , todos los pisos son en madera natural. Vxc3xadas de acceso por la avenida Boyacxc3xa1, la Suba, Calle 138, 134. Muy cerca a importantes centros comerciales, Parque de la Colina, Porto Alegre y Bulevar Niza.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-iberia-4-habitaciones-4-banos-3-garajes/681-M2261735', 'Casa', '950000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'GRATAMIRA', '4.727828502655029', '-74.07007598876953', 'Avenida Boyacxc3xa1 134-0', '111121', '220', '201', '2020-08-08', '5', '2', 'Entre 10 y 20 anos', '4', '4', '3', '{\"CODIGO_WEB\": \"681-M2261735\", \"NOMBRE_COMUN_DEL_BARRIO\": \"GRATAMIRA\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"IBERIA\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$950.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$792.000\", \"AREA_CONSTRUIDA\": \"220 m\", \"AREA_PRIVADA\": \"201 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"4\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"DEPOSITOS\": \"1\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Condominio\", \"NUMERO_DE_NIVELES\": \"2\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"6\", \"TIPO_DE_COCINA\": \"Lineal\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"CUARTO_DE_SERVICIO\": \"Si\", \"VISTA\": \"Interior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Vista panoramica\", \"Cancha(s) de Tennis\", \"Cancha(s) de Futbol\", \"Zonas verdes\", \"Sendero peatonal\", \"Jardin\", \"Apto para ninos\", \"Circuito cerrado de TV\", \"Citofonos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ (1) 6204300/ olga lucia rueda salazar/ 3124332588/ AV 15 # 124-65/ Agente Inmobiliario:/ 3112514804/ PUNTO INMOBILIARIO - B.M. S.A.S', '573112514804' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( 'MC2632790', 'Venta de Apartamento en Cedritos - Bogotxc3xa1 D.C. - MC2632790', 'REMODELADO, Hermoso apartamento, ubicado en Cedritos, zona de altxc3xadsima valorizacixc3xb3n y rodeado de importantes avdas como: Novena, Cra Quince, Calle Ciento cuarenta. Encontramos grandes almacenes como Exito, Olxc3xadmpica, Carulla, Home Center y Centros Ciales como Palatino y Cedritos. El apartamento estxc3xa1 ubicado en cuarto piso con ascensor, parqueadero. Su distribucixc3xb3n le permite tener excelente luz natural, habitaciones muy amplias. Cocina Integral cerrada muy amplia e iluminada. Pisos en PVC.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-cedro-narvaez-3-habitaciones-2-banos-1-garajes/MC2632790', 'Apartamento', '340000000', 'Elvira Saby Thais', 'Bogotxc3xa1 D.C.', 'Cedritos', '4.7258325', '-74.03971', 'Calle 145 13A-58', '110121', '78.27', '78.27', '2020-08-08', 'NULL', 'NULL', 'Entre 0 y 5 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC2632790\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Cedritos\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CEDRO NARVAEZ\", \"VALOR_DE_VENTA\": \"$340.000.000\", \"NUMERO_DE_PISO\": \"4\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"AREA_CONSTRUIDA\": \"78.27 m\", \"AREA_PRIVADA\": \"78.27 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Descubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Citofonos\", \"Con chimenea\", \"Vigilancia\", \"Ascensor\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Zona para ninos\", \"Zonas verdes\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Residencial\"]}', 'NULL', 'Elvira Saby Thais:/ 8161146/ Elvira Saby Thais/ 3138161146', '573138161146' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( 'MC1813654', 'Venta de Apartamento en Gran granada - Bogotxc3xa1 D.C. - MC1813654', 'Espectacular Vista... Zona de alta valorizacixc3xb3n, con salida directa a la Calle 80, cerca a comercio, a industria y a colegios Cota Siberia Tenjo ', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-centro-engativa-3-habitaciones-2-banos-1-garajes/MC1813654', 'Apartamento', '240000000', 'Diana Marcela Castro', 'Bogotxc3xa1 D.C.', 'gran granada', '4.719544', '-74.13024749', 'Calle 74A 116C-60', '111031', '60', '60', '2020-08-08', 'NULL', 'NULL', 'Entre 0 y 5 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC1813654\", \"NOMBRE_COMUN_DEL_BARRIO\": \"gran granada\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CENTRO ENGATIVA\", \"VALOR_DE_VENTA\": \"$240.000.000\", \"NUMERO_DE_PISO\": \"9\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"AREA_CONSTRUIDA\": \"60 m\", \"AREA_PRIVADA\": \"60 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Bano Auxiliar\", \"Citofonos\", \"Cocina Tipo Americano\", \"Vigilancia\", \"Equipado / Amoblado\", \"Piso en madera\", \"Zona de lavanderia\", \"Ascensor\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Salon comunal\", \"Zona para ninos\", \"Zonas verdes\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Ubicado en Edificio\", \"Zona Residencial\"]}', 'NULL', 'Diana Marcela Castro/ Diana Marcela Castro:/ 3203399370/ 3015965448', '573203399370' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( 'MC2415768', 'Venta de Apartamento en Mazuren - Bogotxc3xa1 D.C. - MC2415768', 'Apto en venta directa, Primer Piso, remodelado, 3 habitaciones, 1 baxc3xb1o, Cocina integral, patio de Ropas, Sala Comedor que Incluye mueble funcional para el living, en conjunto residencial cerrado que cuenta con Salxc3xb3n Comunal, gym, 2 parques infantiles, estrato 4, a 5 minutos del centro comercial parque la colina, xc3xa9xito de la Colina, Clxc3xadnica la Colina, Parqueadero Comunal', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-gilmar-3-habitaciones-1-banos-1-garajes/MC2415768', 'Apartamento', '210000000', 'Yulieth Yulieth', 'Bogotxc3xa1 D.C.', 'mazuren', '4.743928', '-74.06217', 'Carrera 59 160-46', '111156', '49', '46.30', '2020-08-08', 'NULL', 'NULL', 'Entre 5 y 10 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"MC2415768\", \"NOMBRE_COMUN_DEL_BARRIO\": \"mazuren\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"GILMAR\", \"VALOR_DE_VENTA\": \"$210.000.000\", \"NUMERO_DE_PISO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"AREA_CONSTRUIDA\": \"49 m\", \"AREA_PRIVADA\": \"46.30 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Comunal\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Vigilancia\", \"Piso en madera\", \"Zona de lavanderia\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Salon comunal\", \"Zona para ninos\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Comercial\", \"Zona Residencial\"]}', 'NULL', 'Yulieth Yulieth/ 3103210391/ Yulieth Yulieth:', 'NULL' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-457', 'Venta de Apartamento en Colina campestre - Bogotxc3xa1 D.C. - 681-457', 'Apartamento en excelente estado en segundo piso,ubicado en tranquilo y central sector cuenta con tres habitaciones cada uno con sus baxc3xb1os, dos parqueaderos, cuenta con salones de estudio.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-helena-i-3-habitaciones-3-banos-2-garajes/681-457', 'Apartamento', '600000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'COLINA CAMPESTRE', '4.733185967592347', '-74.0627852136668', 'Avenida Carrera 58 146A-0', '111156', '111.00', '111.00', '2020-08-08', '5', 'NULL', 'Entre 10 y 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-457\", \"NOMBRE_COMUN_DEL_BARRIO\": \"COLINA CAMPESTRE\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA HELENA I\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$600.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$350.000\", \"AREA_CONSTRUIDA\": \"111.00 m\", \"AREA_PRIVADA\": \"111.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )",
#"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '1821-C0006-07', 'Proyecto Vixc3xba Park 118, Apartamentos nuevos en Santa barbara occidental, Bogotxc3xa1 D.C. - Id 006', 'El proyecto inmobiliario de vivienda Vixc3xba Park 118 se encontrarxc3xa1 localizado en la Calle 116 # 18 B 08, en el sector de Santa Bxc3xa1rbara Occidental, una de las zonas de mayor desarrollo inmobiliario del norte de Bogotxc3xa1, Se desarrolla en una torre residencial de ocho (08) pisos sobre la Calle 116 y de seis (6) pisos sobre la Calle 118. El Proyecto contarxc3xa1 con un moderno lobby tipo hotel, un (1) salxc3xb3n comunal, un (1) gimnasio, un (1) salxc3xb3n de juegos, una (1) oficina para la administracixc3xb3n, perrera con capacidad para dos (2) perros, planta elxc3xa9ctrica de suplencia parcial, dos (2) ascensores de xc3xbaltima tecnologxc3xada, dos (2) escaleras de evacuacixc3xb3n cerradas y cubierta comunal con BBQ, con xc3xa1reas de recreacixc3xb3n activa y pasiva.', '567285461', 'https://www.metrocuadrado.com/proyecto/viu-park-118/1821-C0006', 'Apartamento', 'Bogotxc3xa1 D.C.', 'SANTA BARBARA OCCIDENTAL', '4.699173', '-74.047585', 'Carrera 18 118-37', '110111', '64.55', '53.67', '2020-08-08', '5', 'NULL', 'Sobre Plano', '2', '2', '1', '{\"CODIGO_WEB:\": \"1821-C0006\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA BARBARA OCCIDENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA BARBARA OCCIDENTAL\", \"ESTRATO\": \"5\", \"VALOR_DESDE/HASTA\": \"Desde $435.862.256 hasta $617.143.808\", \"AREA_CONSTRUIDA_DESDE/HASTA\": \"Desde 55.33 hasta 69.8\", \"AREA_PRIVADA_DESDE/HASTA\": \"Desde 48.65 hasta 63.94\", \"TIEMPO_DE_CONSTRUIDO\": \"Sobre Plano\", \"ETAPA_DE_CONSTRUCCION/TIEMPO_DE_CONSTRUIDO\": \"En construccion\", \"MONEDA\": \"Peso colombiano\", \"VIGILANCIA\": \"24hrs\", \"FECHA_ESTIMADA_DE_ENTREGA\": \"04-2022\", \"PARQUEADERO_DE_VISITANTES\": \"Si\", \"complementsAndFinishes\": []}', 'GRADECO CONSTRUCCIONES Y CIA S.A.', 'AVENIDA CARRERA 19 # 120 - 71', '(1) 7424250 Ext: 116/ 3023336520', '573023336520' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '5175-326', 'Venta y arriendo de Casa en Altos de provenza - Bogotxc3xa1 D.C. - 5175-326', 'Espectacular casa, dos pisos, amplios espacios, iluminada. El segundo piso tiene un xc3xa1rea social amplia, una segunda cocina, balcones, chimenea, un estudio y un cuarto con baxc3xb1o. Mayor informacixc3xb3n www.brikss.com.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-niza-suba-4-habitaciones-5-banos-4-garajes/5175-326', 'Casa', '3500000000', 'BRIKSS SAS', 'Bogotxc3xa1 D.C.', 'ALTOS DE PROVENZA', '4.72816722', '-74.07561954', 'Carrera 76A 135-35', '111121', '740', '0', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '5', '5', '4', '{\"CODIGO_WEB\": \"5175-326\", \"NOMBRE_COMUN_DEL_BARRIO\": \"ALTOS DE PROVENZA\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"NIZA SUBA\", \"ESTRATO\": \"6\", \"VALOR_DE_ARRIENDO\": \"$12.000.000\", \"VALOR_DE_VENTA\": \"$3.500.000.000\", \"AREA_CONSTRUIDA\": \"740 m\", \"AREA_DEL_LOTE\": \"740 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"4 o mas\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Tradicional\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"CUARTO_DE_SERVICIO\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Otro\", \"VIGILANCIA\": \"24hrs\", \"complementsAndFinishes\": [\"Hall de Alcobas\", \"Puertas de seguridad\", \"Citofonos\"]}', 'CL 93B # 13 - 14 OF 307', 'CL 93B # 13 - 14 OF 307/ 4661864/ BRIKSS SAS - Principal:/ 3183757239/ BRIKSS SAS - Principal', '573183757239' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '2120-904489', 'Venta de Apartamento en Cerros de los alpes - Bogotxc3xa1 D.C. - 2120-904489', 'Espectacular apto en cerros de los alpes de 326 mas 26 de terraza, piso , ascensor directo, vista a todo Bogota, esquinero, exterior, cuartos cada uno con bano, estudio, estar de tv, parqueaderos independientes. Club house y zonas verdes', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-cerros-de-santa-barbara-4-habitaciones-5-banos-3-garajes/2120-904489', 'Apartamento', '2600000000', 'Fonnegra Fonnegra y Cia Ltda', 'Bogotxc3xa1 D.C.', 'CERROS DE LOS ALPES', '4.696831226348877', '-74.0276107788086', 'Carrera 3B 121-49', '110111', '326', '326', '2020-08-08', '6', 'NULL', 'Entre 5 y 10 anos', '5', '5', '3', '{\"CODIGO_WEB\": \"2120-904489\", \"NOMBRE_COMUN_DEL_BARRIO\": \"CERROS DE LOS ALPES\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CERROS DE SANTA BARBARA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$2.600.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$1.000.000\", \"AREA_CONSTRUIDA\": \"326 m\", \"AREA_PRIVADA\": \"326 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"26\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"4\", \"TIPO_DE_CORTINAS\": \"Persianas\", \"NUMERO_DE_PISO\": \"5\", \"NUMERO_DE_ASCENSORES\": \"2\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Cuarto de escoltas\", \"Vista panoramica\", \"Piscina\", \"Cancha(s) de Tennis\", \"Cancha(s) de squash\", \"Cancha(s) de Basket\", \"Cancha(s) de Futbol\", \"Gimnasio\", \"Sauna y/o turco\", \"Zonas verdes\", \"Salon comunal\", \"Zona para ninos\", \"Acceso para discapacitados\", \"Apto para ninos\", \"Se Permite Fumar\", \"Sobre via secundaria\", \"Alarma\", \"Puertas de seguridad\", \"Circuito cerrado de TV\", \"Citofonos\"]}', '-', '6156742/ Fonnegra Fonnegra y Cia Ltda - Principal/ 3153333651/ Fonnegra Fonnegra y Cia Ltda - Principal:', '573153333651' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '777-M2060731', 'Venta de Apartamento en El limonar - Bogotxc3xa1 D.C. - 777-M2060731', 'Apartamento de dos alcobas en un segundo piso con balcon de alcoba principal hasta la sala, cocina americana abierta en L, con zona de ropas independiente, pisos en madera laminada, alcoba principal con baxc3xb1o privado.Conjunto cuenta con piscina, zonas hxc3xbamedas, gym, salen de juegos, parque infantil, porterxc3xada, sala de espera etc.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-lisboa-2-habitaciones-2-banos-1-garajes/777-M2060731', 'Apartamento', '330000000', 'ALOJAR INMOBILIARIA S.A.', 'Bogotxc3xa1 D.C.', 'EL LIMONAR', '4.716963291168213', '-74.03605651855469', 'Calle 135C 11-33', '110121', '60', '60', '2020-08-08', '5', 'NULL', 'Entre 5 y 10 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"777-M2060731\", \"NOMBRE_COMUN_DEL_BARRIO\": \"EL LIMONAR\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LISBOA\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$330.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$285.000\", \"AREA_CONSTRUIDA\": \"60 m\", \"AREA_PRIVADA\": \"60 m\", \"HABITACIONES\": \"2\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"1\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"5\", \"TIPO_DE_COCINA\": \"En L\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Interior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"2\", \"TIPO_DE_ACABADO_PISO\": \"Laminado\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Laminado\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Laminado\", \"TIPO_DE_PISO_EN_SALA\": \"Laminado\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"2\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Piscina\", \"Gimnasio\", \"Sauna y/o turco\", \"Zonas verdes\", \"Salon comunal\", \"Zona para ninos\", \"Acceso para discapacitados\", \"Apto para ninos\", \"Sobre via secundaria\", \"Circuito cerrado de TV\", \"Citofonos\"]}', 'Carrera 7 # 82 - 66 Oficina 210', 'ALOJAR INMOBILIARIA S.A./ 2364752/ 3214910400/ Agente Inmobiliario:/ (1) 2364752/ inmobiliaria alojar s a/ Carrera 7 # 82 - 66 Oficina 210/ 2566683', '573214910400' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '815-M2500828', 'Venta de Casa en San simxc3xb3n guaymaral - Bogotxc3xa1 D.C. - 815-M2500828', 'Casa Hermosa de dos niveles, primer nivel sala de doble altura, estudio con salida al jardxc3xadn, cocina integral con despensa y cuarto de vajilla, comedor independiente de la sala con plancha de tepanyaki, al lado estxc3xa1 el comedor auxiliar, cava de vinos con temperatura controlada y amplia, segundo nivel, 5 habitaciones con baxc3xb1o y vestier, habitacixc3xb3n principal con dos vistieres, jacuzzi, solxc3xa1rium y turco, salida de terraza, segunda habitacixc3xb3n cuenta con salida a la terraza. La zona servicio es independiente a la casa, 1 piso tiene deposito, zona de lavanderxc3xada, un baxc3xb1o para el conductor, en el 2 piso se encuentra la habitacixc3xb3n para dos empleadas con baxc3xb1o y closets amplios, 4 garajes, zona de bicicletas, parqueadero de visitantes, en la terraza de la casa zona BBQ, un parasol para 8 personas, sistema de control de luces, sonido y cortinas. Paola Sandoval', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-casa-blanca-5-habitaciones-5-banos-4-garajes/815-M2500828', 'Casa', '3900000000', 'CARRIZOSA HERMANOS LTDA', 'Bogotxc3xa1 D.C.', 'SAN SIMxc3x93N GUAYMARAL', '4.8162946701049805', '-74.04507446289062', 'Calle 238 55-35', '111176', '650', '650', '2020-08-08', '6', '2', 'Entre 10 y 20 anos', '5', '5', '4', '{\"CODIGO_WEB\": \"815-M2500828\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN SIMON GUAYMARAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CASA BLANCA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$3.900.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$900.000\", \"AREA_CONSTRUIDA\": \"650 m\", \"AREA_PRIVADA\": \"650 m\", \"AREA_DEL_LOTE\": \"1580 m\", \"HABITACIONES\": \"5 o mas\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"4 o mas\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"2\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Condominio\", \"NUMERO_DE_NIVELES\": \"2\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"AREA_TERRAZA/BALCON\": \"150\", \"TIPO_DE_COCINA\": \"Abierta\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"CON_MUEBLES\": \"Si\", \"NUMERO_DE_CLOSETS\": \"2\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Jacuzzi\", \"Cuarto de escoltas\", \"Vista panoramica\", \"Con chimenea\", \"Apto para ninos\"]}', 'Cra 10 A # 69 A - 11', '5188777/ Alfonso Carrizosa Hermanos/ 3167228825/ Agente Inmobiliario:/ 3103048310/ Paola Sandoval/ carrera 10a #69a-11', '573167228825' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( 'MC2465121', 'Venta de Casa en San jose spring - Bogotxc3xa1 D.C. - MC2465121', 'Casa unifamiliar Barrio Spring, a una cuadra de la autopista norte y estacixc3xb3n Alcalxc3xa1 de TM, Primer piso, Ante jardxc3xadn cubierto para garaje o local, garaje, baxc3xb1o, sala-comedor, cocina integral con deposito. Segundo piso, 4 alcobas, baxc3xb1o y sala de estar. Tercer piso, 2 alcobas, estudio y cuarto de ropas. Cerca a centros comerciales, parques y colegios, zona de gran desarrollo, Colina Campestre y Cedritos. Excelente ubicacixc3xb3n.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-san-jose-del-prado-5-habitaciones-2-banos-1-garajes/MC2465121', 'Casa', '680000000', 'David David', 'Bogotxc3xa1 D.C.', 'SAN JOSE SPRING', '4.7219477', '-74.05317', 'Carrera 46 135-50', '111111', '237', '237', '2020-08-08', 'NULL', 'NULL', 'Mas de 20 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC2465121\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN JOSE SPRING\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SAN JOSE DEL PRADO\", \"VALOR_DE_VENTA\": \"$680.000.000\", \"NUMERO_DE_PISO\": \"3\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"AREA_CONSTRUIDA\": \"237 m\", \"AREA_PRIVADA\": \"237 m\", \"HABITACIONES\": \"5 o mas\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Alarma\", \"Cocina integral\", \"Estudio o biblioteca\", \"Piso en baldosa\", \"Zona de lavanderia\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Residencial\"]}', 'NULL', '3202745257/ 3158788930/ David David/ David David:/ 9289421', '573158788930' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '5175-1914', 'Venta de Apartamento en Los rosales - Bogotxc3xa1 D.C. - 5175-1914', 'Lindo apartamento de 45 Mts2, una alcoba, un baxc3xb1o, sala con chimenea, comedor, cocina integral abierta, vista exterior e iluminacixc3xb3n natural, piso en madera natural, el edificio cuenta con seguridad privada, ascensor, lavanderxc3xada.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-bellavista-1-habitaciones-1-banos-1-garajes/5175-1914', 'Apartamento', '380000000', 'BRIKSS SAS', 'Bogotxc3xa1 D.C.', 'Los Rosales', '4.656804680798692', '-74.0518902569973', 'Calle 75 4-7', '110221', '45', '45', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"5175-1914\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Los Rosales\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"BELLAVISTA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$380.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$380.000\", \"AREA_CONSTRUIDA\": \"45 m\", \"AREA_PRIVADA\": \"45 m\", \"HABITACIONES\": \"1\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TERRAZA/BALCON\": \"Ninguno\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"VISTA\": \"Exterior\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Otro\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": []}', 'CL 93B # 13 - 14 OF 307', 'CL 93B # 13 - 14 OF 307/ 4661864/ BRIKSS SAS - Principal:/ 3183757239/ BRIKSS SAS - Principal', '573183757239' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '633-211', 'Venta y arriendo de Casa en Nicolas de federman - Bogotxc3xa1 D.C. - 633-211', 'CASA LOTE DE TRECIENTOS DIEZ METROS CUADRADOS, EL LOTE TIENE UNA CASA PREFABRICADA DE DOS HABITACIONES, COCINA SEMIINTEGRAL, SALA COMEDOR, BAxc3x91O Y ZONA DE LAVANDERxc3x8dA. PARQUEADERO PARA UN CARRO. once X veintiocho.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-campin-occidental-2-habitaciones-1-banos-1-garajes/633-211', 'Casa', '1600000000', 'GRUPO INMOBILIARIO CRECER SAS', 'Bogotxc3xa1 D.C.', 'Nicolas De Federman', '4.649185761241317', '-74.0804885829666', 'Calle 58A 35A-41', '111321', '310.00', '310.00', '2020-08-08', '4', 'NULL', 'Entre 5 y 10 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"633-211\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Nicolas De Federman\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CAMPIN OCCIDENTAL\", \"ESTRATO\": \"4\", \"VALOR_DE_ARRIENDO\": \"$1.350.000\", \"VALOR_DE_VENTA\": \"$1.600.000.000\", \"AREA_CONSTRUIDA\": \"310.00 m\", \"AREA_PRIVADA\": \"310.00 m\", \"HABITACIONES\": \"2\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TIPO_DE_CASA\": \"Tradicional\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"VISTA\": \"Exterior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Baldosa\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Baldosa\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Sobre via secundaria\"]}', 'Cll 63 No 35 a 37', '3108696065/ grupo inmobiliario crecer - Principal:/ 3108576764/ Cll 63 No 35 a 37/ 3153200/ grupo inmobiliario crecer - Principal', 'NULL' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '887-M2614852', 'Venta de Apartamento en El cedrito - Bogotxc3xa1 D.C. - 887-M2614852', 'Bogota sector el Cedrito, Apartamento muy cxc3xb3modo, buenos acabados, iluminado y muy bien ubicado. ', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-los-cedritos-3-habitaciones-2-banos-1-garajes/887-M2614852', 'Apartamento', '400000000', 'GUILLERMO DUQUE INMOBILIARIA', 'Bogotxc3xa1 D.C.', 'El CEDRITO', '4.730204105377197', '-74.04576110839844', 'Avenida Calle 147 19-30', '110131', '100', '100', '2020-08-08', '4', 'NULL', 'Entre 5 y 10 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"887-M2614852\", \"NOMBRE_COMUN_DEL_BARRIO\": \"El CEDRITO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LOS CEDRITOS\", \"ESTRATO\": \"4\", \"VALOR_DE_VENTA\": \"$400.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$250.000\", \"AREA_CONSTRUIDA\": \"100 m\", \"AREA_PRIVADA\": \"100 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_DE_COCINA\": \"Paralela\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"3\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Sobre via secundaria\"]}', 'Carrera 81 #4G - 65', '3008365673/ GUILLERMO DUQUE INMOBILIARIA:/ GUILLERMO DUQUE INMOBILIARIA/ (4) 2040744', '573008365673' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '847-4004', 'Venta y arriendo de Apartamento en Los rosales - Bogotxc3xa1 D.C. - 847-4004', 'En el exclusivo sector de Los Rosales, donde contarxc3xa1s con gran tranquilidad y acceso a excelentes vxc3xadas de movilizacixc3xb3n como la carrera sxc3xa9ptima, estamos ofreciendo para arriendo o venta este espectacular apartamento. Podrxc3xa1s ingresar a la propiedad a travxc3xa9s de un ascensor privado, y al hacerlo verxc3xa1s que hay aspectos que resaltan dentro del lugar, como su disexc3xb1o, sus espacios y lo luminosos que son. El xc3xa1rea social posee amplios ventanales de piso a techo que permiten una entrada total de luz nat', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-los-rosales-3-habitaciones-4-banos-3-garajes/847-4004', 'Apartamento', '3900000000', 'PADS', 'Bogotxc3xa1 D.C.', 'Los Rosales', '4.660006796513658', '-74.05030510233371', 'Carrera 5 78-95', '110221', '275', '0', '2020-08-08', '6', 'NULL', 'Entre 0 y 5 anos', '4', '4', '3', '{\"CODIGO_WEB\": \"847-4004\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Los Rosales\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LOS ROSALES\", \"ESTRATO\": \"6\", \"VALOR_DE_ARRIENDO\": \"$17.000.000\", \"VALOR_DE_VENTA\": \"$3.900.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$2.000.000\", \"AREA_CONSTRUIDA\": \"275 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"4\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"6\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Hall de Alcobas\", \"Cuarto de escoltas\", \"Con chimenea\", \"Piscina\", \"Gimnasio\", \"Salon comunal\", \"Zona para ninos\", \"Sobre via secundaria\", \"Citofonos\"]}', 'Calle 93B #11A-84 piso 3', 'PADS:/ Calle 93B #11A-84 piso 3 CC Portobelo/ 2566701/ 3142007942/ PADS', '573142007942' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-M2194774', 'Venta de Apartamento en Santa ana oriental - Bogotxc3xa1 D.C. - 681-M2194774', 'Apartamento ubicado en el exclusivo sector de Santa Ana Oriental, el edifico esta rodeado de espectaculares casas, sector muy seguro . El apartamento cuenta con 3 alcobas, alcoba baxc3xb1o de servicio, estudio y terraza.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-ana-oriental-3-habitaciones-3-banos-2-garajes/681-M2194774', 'Apartamento', '850000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'SANTA ANA ORIENTAL', '4.688530921936035', '-74.03411865234375', 'Calle 110 6-50', '110111', '121', '117', '2020-08-08', '6', 'NULL', 'Mas de 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-M2194774\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA ANA ORIENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA ANA ORIENTAL\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$850.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$600.000\", \"AREA_CONSTRUIDA\": \"121 m\", \"AREA_PRIVADA\": \"117 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"AREA_TERRAZA/BALCON\": \"30\", \"TIPO_DE_COCINA\": \"Lineal\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"NUMERO_DE_CLOSETS\": \"3\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"NUMERO_DE_PISO\": \"2\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Con chimenea\", \"Citofonos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ (1) 6204300/ olga lucia rueda salazar/ 3124332588/ AV 15 # 124-65/ Agente Inmobiliario:/ 3112514804/ PUNTO INMOBILIARIO - B.M. S.A.S', '573112514804' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '11233-2377790', 'Venta de Bodega en San rafael - Bogotxc3xa1 D.C. - 11233-2377790', 'Bodega en lote esquinero de casi 147 mts2 12 de frente x 15 de fondo situada en la Carrera 53C con Calle 5B. Las vxc3xadas de acceso a la bodega son amplias y rxc3xa1pidas, muy cerca de Puente Aranda. Tiene una entrada para vehxc3xadculos medianos sobre la carrera. Estxc3xa1 situada en San Rafael, un barrio residencial e industrial por lo que puede ser destinada a bodegaje y o a actividades industriales limpias y de bajo ruido. Piso en concreto y sercha metalica en el techo. Incluye una pequexc3xb1a', 'https://www.metrocuadrado.com/inmueble/venta-bodega-bogota-san-rafael/11233-2377790', 'Bodega', '480000000', 'LOYAL CITY BIENES RAICES', 'Bogotxc3xa1 D.C.', 'SAN RAFAEL', '4.622081608857017', '-74.1125116798233', 'Carrera 53BBIS 5A-52', '111611', '147', '0', '2020-08-08', '3', 'NULL', 'Mas de 20 anos', '', '', '', '{\"CODIGO_WEB\": \"11233-2377790\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN RAFAEL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SAN RAFAEL\", \"ESTRATO\": \"3\", \"VALOR_DE_VENTA\": \"$480.000.000\", \"AREA_CONSTRUIDA\": \"147 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"TIPO_BODEGA\": \"Otro\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"complementsAndFinishes\": []}', 'CARRERA 67 # 46 -14', '3002660813/ LOYAL CITY BIENES RAICES:/ LOYAL CITY BIENES RAICES', '573118448126' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '2266-1972413', 'Venta de Apartamento en Santa barbara occidental - Bogotxc3xa1 D.C. - 2266-1972413', 'Vendo apartamento en piso 4, exterior en muy buen estado. El apto tiene 3 alcobas, 2 baxc3xb1os, sala comedor con chimenea. Cocina cerrada, cuarto y baxc3xb1o de servicio. Zona de lavanderxc3xada. Un Parqueadero cubierto. Los pisos del apto estxc3xa1n en madera laminada. Cocina y baxc3xb1os en buen estado de conservacixc3xb3n. Edificio con vigilancia 24 horas. En excelente estado, ladrillo a la vista. Linda recepcixc3xb3n, excelente ubicacion porque es residencial con bastante comercio por lo tanto se encuentra t', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-barbara-occidental-3-habitaciones-3-banos-1-garajes/2266-1972413', 'Apartamento', '425000000', 'Boutique de Bienes Raices', 'Bogotxc3xa1 D.C.', 'SANTA BARBARA OCCIDENTAL', '4.706132731731016', '-74.05043758211897', 'Avenida Calle 127 20-4', '110121', '93', '93', '2020-08-08', '5', 'NULL', 'Mas de 20 anos', '3', '3', '1', '{\"CODIGO_WEB\": \"2266-1972413\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA BARBARA OCCIDENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA BARBARA OCCIDENTAL\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$425.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$385.000\", \"AREA_CONSTRUIDA\": \"93 m\", \"AREA_PRIVADA\": \"93 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"NUMERO_DE_PISO\": \"4\", \"NUMERO_DE_ASCENSORES\": \"1\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Con chimenea\", \"Citofonos\"]}', 'na', 'XIMENA FERNANDEZ/ 3138528340/ CL 0 # 0 - 0/ XIMENA FERNANDEZ:', '573138528340' )",
"INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '681-458', 'Venta de Local Comercial en Puente aranda - Bogotxc3xa1 D.C. - 681-458', 'LOCAL UBICADO EN EL CENTRO COMERCIA OUTLET FACTORY, CENTRADO EN EL PRIMER PISO.LUGAR CONCURRIDO MUY BIEN UBICADO.', 'https://www.metrocuadrado.com/inmueble/venta-local-comercial-bogota-salazar-gomez/681-458', 'Local Comercial', '3460000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Puente Aranda', '4.628376343649677', '-74.11732013354063', 'Avenida Calle 9-0', '111611', '213.00', '213.00', '2020-08-08', '7', 'NULL', 'Entre 5 y 10 anos', '', '', '', '{\"CODIGO_WEB\": \"681-458\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Puente Aranda\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SALAZAR GOMEZ\", \"ESTRATO\": \"7\", \"VALOR_DE_VENTA\": \"$3.460.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$3.900.000\", \"AREA_CONSTRUIDA\": \"213.00 m\", \"AREA_PRIVADA\": \"213.00 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TIPO_LOCAL\": \"Otro\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )"
]
for x in myli:
mycursor.execute(x)
#mybulk = "INSERT INTO states ( id_web, title, description, url, type, price, agency_name, city, sector, latitude, longitude, address, postal_code, built_area, private_area, last_date, strata, floors, antiquity, bedrooms, bathrooms, garages, features, agency_address, agency_contact, agency_phone ) VALUES ( '5434-C0003-08', 'Proyecto LUMINIUM, Apartamentos nuevos en Rincon del chico, Bogotxc3xa1 D.C. - Id 003', 'Exclusivo proyecto con excelente ubicacixc3xb3n, 70 apartamentos en 8 pisos con las mejores especificaciones (ventanerxc3xada acxc3xbastica en habitaciones). Lobby tipo hotel, Salxc3xb3n social, Lockers para las empleadas del servicio domxc3xa9stico con baxc3xb1o, Business center, Oficina de administracixc3xb3n y Terraza comunal con Gimnasio, pista de trote, parque infantil, spot de mascotas y bbq. Parqueadero de visitantes, bicicletero, planta elxc3xa9ctrica de suplencia total y sistema de captacixc3xb3n de aguas pluviales. 22 meses para pago de cuota inicial', '392564000', 'https://www.metrocuadrado.com/proyecto/luminium/5434-C0003', 'Apartamento', 'Bogotxc3xa1 D.C.', 'CHICO SAN PATRICIO', '4.68919', '-74.04276', 'Calle 106 13-28', '110111', '52.73', '47.58', '2020-08-08', '5', 'NULL', 'En Construccion', '2', '2', '1', '{\"CODIGO_WEB:\": \"5434-C0003\", \"NOMBRE_COMUN_DEL_BARRIO\": \"CHICO SAN PATRICIO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"RINCON DEL CHICO\", \"ESTRATO\": \"5\", \"VALOR_DESDE/HASTA\": \"Desde $347.108.000 hasta $625.004.000\", \"AREA_CONSTRUIDA_DESDE/HASTA\": \"Desde 38.99 hasta 79.63\", \"AREA_PRIVADA_DESDE/HASTA\": \"Desde 35.13 hasta 86.07\", \"TIEMPO_DE_CONSTRUIDO\": \"En Construccion\", \"ETAPA_DE_CONSTRUCCION/TIEMPO_DE_CONSTRUIDO\": \"En construccion\", \"MONEDA\": \"Peso colombiano\", \"VIGILANCIA\": \"24hrs\", \"FECHA_ESTIMADA_DE_ENTREGA\": \"04-2022\", \"PARQUEADERO_DE_VISITANTES\": \"Si\", \"complementsAndFinishes\": []}', 'Consucasa SAS', 'Calle 119 # 72 a - 26', '3212686024/ (1) 7430066 Ext: 531/ 3213137285', '573213137285' ),( '681-446', 'Venta de Apartamento en Santa monica - Bogotxc3xa1 D.C. - 681-446', 'Apartamento ubicado en exclusivo y central sector, habitaciones amplias para la familia, inmueble en buen estado, buenas vxc3xadas de acceso', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-sabanalarga-3-habitaciones-3-banos-2-garajes/681-446', 'Apartamento', '930000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Santa Monica', '4.707255893469122', '-74.04345375514912', 'Calle 127ABISA 15-58', '110121', '180.00', '180.00', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-446\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Santa Monica\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SABANALARGA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$930.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$690.600\", \"AREA_CONSTRUIDA\": \"180.00 m\", \"AREA_PRIVADA\": \"180.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"CUARTO_DE_SERVICIO\": \"Si\", \"TIPO_DE_CORTINAS\": \"Persianas\", \"complementsAndFinishes\": [\"Con chimenea\", \"Gimnasio\", \"Salon comunal\", \"Zona para ninos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' ),( '681-432', 'Venta y arriendo de Bodega en Prado veraniego - Bogotxc3xa1 D.C. - 681-432', 'Arriendo o vendo bodega situada en el barrio prado veraniego, de tres pisos, tres baxc3xb1os , puntos de luz trifxc3xa1sica y monofxc3xa1sica doscientos veinte y ciento diez v ,carga de energxc3xada treintakw con trifxc3xa1sica , la placa de la bodega soporta un peso de dos.cero klg por metro cuadrado en el primer piso, en los pisos dos y tres el peso a soportar es de doscientoskg por metro cuadrado.', 'https://www.metrocuadrado.com/inmueble/venta-bodega-bogota-prado-veraniego-norte/681-432', 'Bodega', '2000000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'PRADO VERANIEGO', '4.718366979453166', '-74.06691208047147', 'Carrera 57A 129B-1', '111111', '390.00', '390.00', '2020-08-08', '3', 'NULL', 'Entre 0 y 5 anos', '', '', '', '{\"CODIGO_WEB\": \"681-432\", \"NOMBRE_COMUN_DEL_BARRIO\": \"PRADO VERANIEGO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"PRADO VERANIEGO NORTE\", \"ESTRATO\": \"3\", \"VALOR_DE_ARRIENDO\": \"$14.000.000\", \"VALOR_DE_VENTA\": \"$2.000.000.000\", \"AREA_CONSTRUIDA\": \"390.00 m\", \"AREA_PRIVADA\": \"390.00 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"TIPO_BODEGA\": \"Otro\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' ),( '681-429', 'Venta de Apartamento en Chico alto - Bogotxc3xa1 D.C. - 681-429', 'Hermoso apartamento, ubicado en exclusivo sector de la ciudad. Duplex, espectacular zona social, tres alcobas, dos de ellas con walking closet y baxc3xb1o. Todas las xc3xa1reas del apartamento tienen acceso a las terrazas.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-bosque-de-bellavista-3-habitaciones-5-banos-2-garajes/681-429', 'Apartamento', '3200000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Chico Alto', '4.674491536261334', '-74.02694634878162', 'Carrera 1G Este 101B-0', '110111', '374.00', '305.00', '2020-08-08', '6', 'NULL', 'Entre 0 y 5 anos', '5', '5', '2', '{\"CODIGO_WEB\": \"681-429\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Chico Alto\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"BOSQUE DE BELLAVISTA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$3.200.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$3.128.000\", \"AREA_CONSTRUIDA\": \"374.00 m\", \"AREA_PRIVADA\": \"305.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' ),( '681-M2261735', 'Venta de Casa en Gratamira - Bogotxc3xa1 D.C. - 681-M2261735', 'Hermosa casa, en conjunto cerrado con cancha de fxc3xbatbol, cancha de tenis, parque, senderos peatonales, sector tranquilo y exclusivo, total de casas 23 . La casa cuenta con 4 habitaciones, estudio, alcoba y baxc3xb1o de servicio, zona de bbq , todos los pisos son en madera natural. Vxc3xadas de acceso por la avenida Boyacxc3xa1, la Suba, Calle 138, 134. Muy cerca a importantes centros comerciales, Parque de la Colina, Porto Alegre y Bulevar Niza.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-iberia-4-habitaciones-4-banos-3-garajes/681-M2261735', 'Casa', '950000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'GRATAMIRA', '4.727828502655029', '-74.07007598876953', 'Avenida Boyacxc3xa1 134-0', '111121', '220', '201', '2020-08-08', '5', '2', 'Entre 10 y 20 anos', '4', '4', '3', '{\"CODIGO_WEB\": \"681-M2261735\", \"NOMBRE_COMUN_DEL_BARRIO\": \"GRATAMIRA\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"IBERIA\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$950.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$792.000\", \"AREA_CONSTRUIDA\": \"220 m\", \"AREA_PRIVADA\": \"201 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"4\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"DEPOSITOS\": \"1\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Condominio\", \"NUMERO_DE_NIVELES\": \"2\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"6\", \"TIPO_DE_COCINA\": \"Lineal\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"CUARTO_DE_SERVICIO\": \"Si\", \"VISTA\": \"Interior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Vista panoramica\", \"Cancha(s) de Tennis\", \"Cancha(s) de Futbol\", \"Zonas verdes\", \"Sendero peatonal\", \"Jardin\", \"Apto para ninos\", \"Circuito cerrado de TV\", \"Citofonos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ (1) 6204300/ olga lucia rueda salazar/ 3124332588/ AV 15 # 124-65/ Agente Inmobiliario:/ 3112514804/ PUNTO INMOBILIARIO - B.M. S.A.S', '573112514804' ),( 'MC2632790', 'Venta de Apartamento en Cedritos - Bogotxc3xa1 D.C. - MC2632790', 'REMODELADO, Hermoso apartamento, ubicado en Cedritos, zona de altxc3xadsima valorizacixc3xb3n y rodeado de importantes avdas como: Novena, Cra Quince, Calle Ciento cuarenta. Encontramos grandes almacenes como Exito, Olxc3xadmpica, Carulla, Home Center y Centros Ciales como Palatino y Cedritos. El apartamento estxc3xa1 ubicado en cuarto piso con ascensor, parqueadero. Su distribucixc3xb3n le permite tener excelente luz natural, habitaciones muy amplias. Cocina Integral cerrada muy amplia e iluminada. Pisos en PVC.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-cedro-narvaez-3-habitaciones-2-banos-1-garajes/MC2632790', 'Apartamento', '340000000', 'Elvira Saby Thais', 'Bogotxc3xa1 D.C.', 'Cedritos', '4.7258325', '-74.03971', 'Calle 145 13A-58', '110121', '78.27', '78.27', '2020-08-08', 'NULL', 'NULL', 'Entre 0 y 5 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC2632790\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Cedritos\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CEDRO NARVAEZ\", \"VALOR_DE_VENTA\": \"$340.000.000\", \"NUMERO_DE_PISO\": \"4\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"AREA_CONSTRUIDA\": \"78.27 m\", \"AREA_PRIVADA\": \"78.27 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Descubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Citofonos\", \"Con chimenea\", \"Vigilancia\", \"Ascensor\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Zona para ninos\", \"Zonas verdes\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Residencial\"]}', 'NULL', 'Elvira Saby Thais:/ 8161146/ Elvira Saby Thais/ 3138161146', '573138161146' ),( 'MC1813654', 'Venta de Apartamento en Gran granada - Bogotxc3xa1 D.C. - MC1813654', 'Espectacular Vista... Zona de alta valorizacixc3xb3n, con salida directa a la Calle 80, cerca a comercio, a industria y a colegios Cota Siberia Tenjo ', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-centro-engativa-3-habitaciones-2-banos-1-garajes/MC1813654', 'Apartamento', '240000000', 'Diana Marcela Castro', 'Bogotxc3xa1 D.C.', 'gran granada', '4.719544', '-74.13024749', 'Calle 74A 116C-60', '111031', '60', '60', '2020-08-08', 'NULL', 'NULL', 'Entre 0 y 5 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC1813654\", \"NOMBRE_COMUN_DEL_BARRIO\": \"gran granada\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CENTRO ENGATIVA\", \"VALOR_DE_VENTA\": \"$240.000.000\", \"NUMERO_DE_PISO\": \"9\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"AREA_CONSTRUIDA\": \"60 m\", \"AREA_PRIVADA\": \"60 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Bano Auxiliar\", \"Citofonos\", \"Cocina Tipo Americano\", \"Vigilancia\", \"Equipado / Amoblado\", \"Piso en madera\", \"Zona de lavanderia\", \"Ascensor\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Salon comunal\", \"Zona para ninos\", \"Zonas verdes\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Ubicado en Edificio\", \"Zona Residencial\"]}', 'NULL', 'Diana Marcela Castro/ Diana Marcela Castro:/ 3203399370/ 3015965448', '573203399370' ),( 'MC2415768', 'Venta de Apartamento en Mazuren - Bogotxc3xa1 D.C. - MC2415768', 'Apto en venta directa, Primer Piso, remodelado, 3 habitaciones, 1 baxc3xb1o, Cocina integral, patio de Ropas, Sala Comedor que Incluye mueble funcional para el living, en conjunto residencial cerrado que cuenta con Salxc3xb3n Comunal, gym, 2 parques infantiles, estrato 4, a 5 minutos del centro comercial parque la colina, xc3xa9xito de la Colina, Clxc3xadnica la Colina, Parqueadero Comunal', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-gilmar-3-habitaciones-1-banos-1-garajes/MC2415768', 'Apartamento', '210000000', 'Yulieth Yulieth', 'Bogotxc3xa1 D.C.', 'mazuren', '4.743928', '-74.06217', 'Carrera 59 160-46', '111156', '49', '46.30', '2020-08-08', 'NULL', 'NULL', 'Entre 5 y 10 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"MC2415768\", \"NOMBRE_COMUN_DEL_BARRIO\": \"mazuren\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"GILMAR\", \"VALOR_DE_VENTA\": \"$210.000.000\", \"NUMERO_DE_PISO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"AREA_CONSTRUIDA\": \"49 m\", \"AREA_PRIVADA\": \"46.30 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Comunal\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Cocina integral\", \"Vigilancia\", \"Piso en madera\", \"Zona de lavanderia\", \"Conjunto cerrado\", \"Parqueadero visitantes\", \"Porteria\", \"Salon comunal\", \"Zona para ninos\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Comercial\", \"Zona Residencial\"]}', 'NULL', 'Yulieth Yulieth/ 3103210391/ Yulieth Yulieth:', 'NULL' ),( '681-457', 'Venta de Apartamento en Colina campestre - Bogotxc3xa1 D.C. - 681-457', 'Apartamento en excelente estado en segundo piso,ubicado en tranquilo y central sector cuenta con tres habitaciones cada uno con sus baxc3xb1os, dos parqueaderos, cuenta con salones de estudio.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-helena-i-3-habitaciones-3-banos-2-garajes/681-457', 'Apartamento', '600000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'COLINA CAMPESTRE', '4.733185967592347', '-74.0627852136668', 'Avenida Carrera 58 146A-0', '111156', '111.00', '111.00', '2020-08-08', '5', 'NULL', 'Entre 10 y 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-457\", \"NOMBRE_COMUN_DEL_BARRIO\": \"COLINA CAMPESTRE\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA HELENA I\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$600.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$350.000\", \"AREA_CONSTRUIDA\": \"111.00 m\", \"AREA_PRIVADA\": \"111.00 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' ),( '1821-C0006-07', 'Proyecto Vixc3xba Park 118, Apartamentos nuevos en Santa barbara occidental, Bogotxc3xa1 D.C. - Id 006', 'El proyecto inmobiliario de vivienda Vixc3xba Park 118 se encontrarxc3xa1 localizado en la Calle 116 # 18 B 08, en el sector de Santa Bxc3xa1rbara Occidental, una de las zonas de mayor desarrollo inmobiliario del norte de Bogotxc3xa1, Se desarrolla en una torre residencial de ocho (08) pisos sobre la Calle 116 y de seis (6) pisos sobre la Calle 118. El Proyecto contarxc3xa1 con un moderno lobby tipo hotel, un (1) salxc3xb3n comunal, un (1) gimnasio, un (1) salxc3xb3n de juegos, una (1) oficina para la administracixc3xb3n, perrera con capacidad para dos (2) perros, planta elxc3xa9ctrica de suplencia parcial, dos (2) ascensores de xc3xbaltima tecnologxc3xada, dos (2) escaleras de evacuacixc3xb3n cerradas y cubierta comunal con BBQ, con xc3xa1reas de recreacixc3xb3n activa y pasiva.', '567285461', 'https://www.metrocuadrado.com/proyecto/viu-park-118/1821-C0006', 'Apartamento', 'Bogotxc3xa1 D.C.', 'SANTA BARBARA OCCIDENTAL', '4.699173', '-74.047585', 'Carrera 18 118-37', '110111', '64.55', '53.67', '2020-08-08', '5', 'NULL', 'Sobre Plano', '2', '2', '1', '{\"CODIGO_WEB:\": \"1821-C0006\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA BARBARA OCCIDENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA BARBARA OCCIDENTAL\", \"ESTRATO\": \"5\", \"VALOR_DESDE/HASTA\": \"Desde $435.862.256 hasta $617.143.808\", \"AREA_CONSTRUIDA_DESDE/HASTA\": \"Desde 55.33 hasta 69.8\", \"AREA_PRIVADA_DESDE/HASTA\": \"Desde 48.65 hasta 63.94\", \"TIEMPO_DE_CONSTRUIDO\": \"Sobre Plano\", \"ETAPA_DE_CONSTRUCCION/TIEMPO_DE_CONSTRUIDO\": \"En construccion\", \"MONEDA\": \"Peso colombiano\", \"VIGILANCIA\": \"24hrs\", \"FECHA_ESTIMADA_DE_ENTREGA\": \"04-2022\", \"PARQUEADERO_DE_VISITANTES\": \"Si\", \"complementsAndFinishes\": []}', 'GRADECO CONSTRUCCIONES Y CIA S.A.', 'AVENIDA CARRERA 19 # 120 - 71', '(1) 7424250 Ext: 116/ 3023336520', '573023336520' ),( '5175-326', 'Venta y arriendo de Casa en Altos de provenza - Bogotxc3xa1 D.C. - 5175-326', 'Espectacular casa, dos pisos, amplios espacios, iluminada. El segundo piso tiene un xc3xa1rea social amplia, una segunda cocina, balcones, chimenea, un estudio y un cuarto con baxc3xb1o. Mayor informacixc3xb3n www.brikss.com.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-niza-suba-4-habitaciones-5-banos-4-garajes/5175-326', 'Casa', '3500000000', 'BRIKSS SAS', 'Bogotxc3xa1 D.C.', 'ALTOS DE PROVENZA', '4.72816722', '-74.07561954', 'Carrera 76A 135-35', '111121', '740', '0', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '5', '5', '4', '{\"CODIGO_WEB\": \"5175-326\", \"NOMBRE_COMUN_DEL_BARRIO\": \"ALTOS DE PROVENZA\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"NIZA SUBA\", \"ESTRATO\": \"6\", \"VALOR_DE_ARRIENDO\": \"$12.000.000\", \"VALOR_DE_VENTA\": \"$3.500.000.000\", \"AREA_CONSTRUIDA\": \"740 m\", \"AREA_DEL_LOTE\": \"740 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"4 o mas\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Tradicional\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"CUARTO_DE_SERVICIO\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Otro\", \"VIGILANCIA\": \"24hrs\", \"complementsAndFinishes\": [\"Hall de Alcobas\", \"Puertas de seguridad\", \"Citofonos\"]}', 'CL 93B # 13 - 14 OF 307', 'CL 93B # 13 - 14 OF 307/ 4661864/ BRIKSS SAS - Principal:/ 3183757239/ BRIKSS SAS - Principal', '573183757239' ),( '2120-904489', 'Venta de Apartamento en Cerros de los alpes - Bogotxc3xa1 D.C. - 2120-904489', 'Espectacular apto en cerros de los alpes de 326 mas 26 de terraza, piso , ascensor directo, vista a todo Bogota, esquinero, exterior, cuartos cada uno con bano, estudio, estar de tv, parqueaderos independientes. Club house y zonas verdes', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-cerros-de-santa-barbara-4-habitaciones-5-banos-3-garajes/2120-904489', 'Apartamento', '2600000000', 'Fonnegra Fonnegra y Cia Ltda', 'Bogotxc3xa1 D.C.', 'CERROS DE LOS ALPES', '4.696831226348877', '-74.0276107788086', 'Carrera 3B 121-49', '110111', '326', '326', '2020-08-08', '6', 'NULL', 'Entre 5 y 10 anos', '5', '5', '3', '{\"CODIGO_WEB\": \"2120-904489\", \"NOMBRE_COMUN_DEL_BARRIO\": \"CERROS DE LOS ALPES\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CERROS DE SANTA BARBARA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$2.600.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$1.000.000\", \"AREA_CONSTRUIDA\": \"326 m\", \"AREA_PRIVADA\": \"326 m\", \"HABITACIONES\": \"4\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"26\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"4\", \"TIPO_DE_CORTINAS\": \"Persianas\", \"NUMERO_DE_PISO\": \"5\", \"NUMERO_DE_ASCENSORES\": \"2\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Cuarto de escoltas\", \"Vista panoramica\", \"Piscina\", \"Cancha(s) de Tennis\", \"Cancha(s) de squash\", \"Cancha(s) de Basket\", \"Cancha(s) de Futbol\", \"Gimnasio\", \"Sauna y/o turco\", \"Zonas verdes\", \"Salon comunal\", \"Zona para ninos\", \"Acceso para discapacitados\", \"Apto para ninos\", \"Se Permite Fumar\", \"Sobre via secundaria\", \"Alarma\", \"Puertas de seguridad\", \"Circuito cerrado de TV\", \"Citofonos\"]}', '-', '6156742/ Fonnegra Fonnegra y Cia Ltda - Principal/ 3153333651/ Fonnegra Fonnegra y Cia Ltda - Principal:', '573153333651' ),( '777-M2060731', 'Venta de Apartamento en El limonar - Bogotxc3xa1 D.C. - 777-M2060731', 'Apartamento de dos alcobas en un segundo piso con balcon de alcoba principal hasta la sala, cocina americana abierta en L, con zona de ropas independiente, pisos en madera laminada, alcoba principal con baxc3xb1o privado.Conjunto cuenta con piscina, zonas hxc3xbamedas, gym, salen de juegos, parque infantil, porterxc3xada, sala de espera etc.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-lisboa-2-habitaciones-2-banos-1-garajes/777-M2060731', 'Apartamento', '330000000', 'ALOJAR INMOBILIARIA S.A.', 'Bogotxc3xa1 D.C.', 'EL LIMONAR', '4.716963291168213', '-74.03605651855469', 'Calle 135C 11-33', '110121', '60', '60', '2020-08-08', '5', 'NULL', 'Entre 5 y 10 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"777-M2060731\", \"NOMBRE_COMUN_DEL_BARRIO\": \"EL LIMONAR\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LISBOA\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$330.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$285.000\", \"AREA_CONSTRUIDA\": \"60 m\", \"AREA_PRIVADA\": \"60 m\", \"HABITACIONES\": \"2\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"1\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TERRAZA/BALCON\": \"Balcon\", \"AREA_TERRAZA/BALCON\": \"5\", \"TIPO_DE_COCINA\": \"En L\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Interior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"2\", \"TIPO_DE_ACABADO_PISO\": \"Laminado\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Laminado\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Laminado\", \"TIPO_DE_PISO_EN_SALA\": \"Laminado\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"2\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Piscina\", \"Gimnasio\", \"Sauna y/o turco\", \"Zonas verdes\", \"Salon comunal\", \"Zona para ninos\", \"Acceso para discapacitados\", \"Apto para ninos\", \"Sobre via secundaria\", \"Circuito cerrado de TV\", \"Citofonos\"]}', 'Carrera 7 # 82 - 66 Oficina 210', 'ALOJAR INMOBILIARIA S.A./ 2364752/ 3214910400/ Agente Inmobiliario:/ (1) 2364752/ inmobiliaria alojar s a/ Carrera 7 # 82 - 66 Oficina 210/ 2566683', '573214910400' ),( '815-M2500828', 'Venta de Casa en San simxc3xb3n guaymaral - Bogotxc3xa1 D.C. - 815-M2500828', 'Casa Hermosa de dos niveles, primer nivel sala de doble altura, estudio con salida al jardxc3xadn, cocina integral con despensa y cuarto de vajilla, comedor independiente de la sala con plancha de tepanyaki, al lado estxc3xa1 el comedor auxiliar, cava de vinos con temperatura controlada y amplia, segundo nivel, 5 habitaciones con baxc3xb1o y vestier, habitacixc3xb3n principal con dos vistieres, jacuzzi, solxc3xa1rium y turco, salida de terraza, segunda habitacixc3xb3n cuenta con salida a la terraza. La zona servicio es independiente a la casa, 1 piso tiene deposito, zona de lavanderxc3xada, un baxc3xb1o para el conductor, en el 2 piso se encuentra la habitacixc3xb3n para dos empleadas con baxc3xb1o y closets amplios, 4 garajes, zona de bicicletas, parqueadero de visitantes, en la terraza de la casa zona BBQ, un parasol para 8 personas, sistema de control de luces, sonido y cortinas. Paola Sandoval', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-casa-blanca-5-habitaciones-5-banos-4-garajes/815-M2500828', 'Casa', '3900000000', 'CARRIZOSA HERMANOS LTDA', 'Bogotxc3xa1 D.C.', 'SAN SIMxc3x93N GUAYMARAL', '4.8162946701049805', '-74.04507446289062', 'Calle 238 55-35', '111176', '650', '650', '2020-08-08', '6', '2', 'Entre 10 y 20 anos', '5', '5', '4', '{\"CODIGO_WEB\": \"815-M2500828\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN SIMON GUAYMARAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CASA BLANCA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$3.900.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$900.000\", \"AREA_CONSTRUIDA\": \"650 m\", \"AREA_PRIVADA\": \"650 m\", \"AREA_DEL_LOTE\": \"1580 m\", \"HABITACIONES\": \"5 o mas\", \"BANOS\": \"5 o mas\", \"PARQUEADERO\": \"4 o mas\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"2\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TIPO_DE_CASA\": \"Condominio\", \"NUMERO_DE_NIVELES\": \"2\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"AREA_TERRAZA/BALCON\": \"150\", \"TIPO_DE_COCINA\": \"Abierta\", \"TIPO_COMEDOR\": \"Comedor Independiente\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"CON_MUEBLES\": \"Si\", \"NUMERO_DE_CLOSETS\": \"2\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"VIGILANCIA\": \"24hrs\", \"ZONA_DE_BBQ\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Jacuzzi\", \"Cuarto de escoltas\", \"Vista panoramica\", \"Con chimenea\", \"Apto para ninos\"]}', 'Cra 10 A # 69 A - 11', '5188777/ Alfonso Carrizosa Hermanos/ 3167228825/ Agente Inmobiliario:/ 3103048310/ Paola Sandoval/ carrera 10a #69a-11', '573167228825' ),( 'MC2465121', 'Venta de Casa en San jose spring - Bogotxc3xa1 D.C. - MC2465121', 'Casa unifamiliar Barrio Spring, a una cuadra de la autopista norte y estacixc3xb3n Alcalxc3xa1 de TM, Primer piso, Ante jardxc3xadn cubierto para garaje o local, garaje, baxc3xb1o, sala-comedor, cocina integral con deposito. Segundo piso, 4 alcobas, baxc3xb1o y sala de estar. Tercer piso, 2 alcobas, estudio y cuarto de ropas. Cerca a centros comerciales, parques y colegios, zona de gran desarrollo, Colina Campestre y Cedritos. Excelente ubicacixc3xb3n.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-san-jose-del-prado-5-habitaciones-2-banos-1-garajes/MC2465121', 'Casa', '680000000', 'David David', 'Bogotxc3xa1 D.C.', 'SAN JOSE SPRING', '4.7219477', '-74.05317', 'Carrera 46 135-50', '111111', '237', '237', '2020-08-08', 'NULL', 'NULL', 'Mas de 20 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"MC2465121\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN JOSE SPRING\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SAN JOSE DEL PRADO\", \"VALOR_DE_VENTA\": \"$680.000.000\", \"NUMERO_DE_PISO\": \"3\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"AREA_CONSTRUIDA\": \"237 m\", \"AREA_PRIVADA\": \"237 m\", \"HABITACIONES\": \"5 o mas\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CARACTERISTICAS_DEL_PARQUEADERO\": \"Cubierto\", \"complementsAndFinishes\": [\"Alarma\", \"Cocina integral\", \"Estudio o biblioteca\", \"Piso en baldosa\", \"Zona de lavanderia\", \"Cerca Centros Comerciales\", \"Cerca Colegios / Universidades\", \"Cerca Parques\", \"Cerca Supermercados\", \"Cerca Transporte Publico\", \"Zona Residencial\"]}', 'NULL', '3202745257/ 3158788930/ David David/ David David:/ 9289421', '573158788930' ),( '5175-1914', 'Venta de Apartamento en Los rosales - Bogotxc3xa1 D.C. - 5175-1914', 'Lindo apartamento de 45 Mts2, una alcoba, un baxc3xb1o, sala con chimenea, comedor, cocina integral abierta, vista exterior e iluminacixc3xb3n natural, piso en madera natural, el edificio cuenta con seguridad privada, ascensor, lavanderxc3xada.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-bellavista-1-habitaciones-1-banos-1-garajes/5175-1914', 'Apartamento', '380000000', 'BRIKSS SAS', 'Bogotxc3xa1 D.C.', 'Los Rosales', '4.656804680798692', '-74.0518902569973', 'Calle 75 4-7', '110221', '45', '45', '2020-08-08', '6', 'NULL', 'Entre 10 y 20 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"5175-1914\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Los Rosales\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"BELLAVISTA\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$380.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$380.000\", \"AREA_CONSTRUIDA\": \"45 m\", \"AREA_PRIVADA\": \"45 m\", \"HABITACIONES\": \"1\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 10 y 20 anos\", \"TERRAZA/BALCON\": \"Ninguno\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"VISTA\": \"Exterior\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Otro\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": []}', 'CL 93B # 13 - 14 OF 307', 'CL 93B # 13 - 14 OF 307/ 4661864/ BRIKSS SAS - Principal:/ 3183757239/ BRIKSS SAS - Principal', '573183757239' ),( '633-211', 'Venta y arriendo de Casa en Nicolas de federman - Bogotxc3xa1 D.C. - 633-211', 'CASA LOTE DE TRECIENTOS DIEZ METROS CUADRADOS, EL LOTE TIENE UNA CASA PREFABRICADA DE DOS HABITACIONES, COCINA SEMIINTEGRAL, SALA COMEDOR, BAxc3x91O Y ZONA DE LAVANDERxc3x8dA. PARQUEADERO PARA UN CARRO. once X veintiocho.', 'https://www.metrocuadrado.com/inmueble/venta-casa-bogota-campin-occidental-2-habitaciones-1-banos-1-garajes/633-211', 'Casa', '1600000000', 'GRUPO INMOBILIARIO CRECER SAS', 'Bogotxc3xa1 D.C.', 'Nicolas De Federman', '4.649185761241317', '-74.0804885829666', 'Calle 58A 35A-41', '111321', '310.00', '310.00', '2020-08-08', '4', 'NULL', 'Entre 5 y 10 anos', '1', '1', '1', '{\"CODIGO_WEB\": \"633-211\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Nicolas De Federman\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"CAMPIN OCCIDENTAL\", \"ESTRATO\": \"4\", \"VALOR_DE_ARRIENDO\": \"$1.350.000\", \"VALOR_DE_VENTA\": \"$1.600.000.000\", \"AREA_CONSTRUIDA\": \"310.00 m\", \"AREA_PRIVADA\": \"310.00 m\", \"HABITACIONES\": \"2\", \"BANOS\": \"1\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TIPO_DE_CASA\": \"Tradicional\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"VISTA\": \"Exterior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Baldosa\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Baldosa\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Sobre via secundaria\"]}', 'Cll 63 No 35 a 37', '3108696065/ grupo inmobiliario crecer - Principal:/ 3108576764/ Cll 63 No 35 a 37/ 3153200/ grupo inmobiliario crecer - Principal', 'NULL' ),( '887-M2614852', 'Venta de Apartamento en El cedrito - Bogotxc3xa1 D.C. - 887-M2614852', 'Bogota sector el Cedrito, Apartamento muy cxc3xb3modo, buenos acabados, iluminado y muy bien ubicado. ', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-los-cedritos-3-habitaciones-2-banos-1-garajes/887-M2614852', 'Apartamento', '400000000', 'GUILLERMO DUQUE INMOBILIARIA', 'Bogotxc3xa1 D.C.', 'El CEDRITO', '4.730204105377197', '-74.04576110839844', 'Avenida Calle 147 19-30', '110131', '100', '100', '2020-08-08', '4', 'NULL', 'Entre 5 y 10 anos', '2', '2', '1', '{\"CODIGO_WEB\": \"887-M2614852\", \"NOMBRE_COMUN_DEL_BARRIO\": \"El CEDRITO\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LOS CEDRITOS\", \"ESTRATO\": \"4\", \"VALOR_DE_VENTA\": \"$400.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$250.000\", \"AREA_CONSTRUIDA\": \"100 m\", \"AREA_PRIVADA\": \"100 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"2\", \"PARQUEADERO\": \"1\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TIPO_DE_COCINA\": \"Paralela\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"NUMERO_DE_CLOSETS\": \"3\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"complementsAndFinishes\": [\"Sobre via secundaria\"]}', 'Carrera 81 #4G - 65', '3008365673/ GUILLERMO DUQUE INMOBILIARIA:/ GUILLERMO DUQUE INMOBILIARIA/ (4) 2040744', '573008365673' ),( '847-4004', 'Venta y arriendo de Apartamento en Los rosales - Bogotxc3xa1 D.C. - 847-4004', 'En el exclusivo sector de Los Rosales, donde contarxc3xa1s con gran tranquilidad y acceso a excelentes vxc3xadas de movilizacixc3xb3n como la carrera sxc3xa9ptima, estamos ofreciendo para arriendo o venta este espectacular apartamento. Podrxc3xa1s ingresar a la propiedad a travxc3xa9s de un ascensor privado, y al hacerlo verxc3xa1s que hay aspectos que resaltan dentro del lugar, como su disexc3xb1o, sus espacios y lo luminosos que son. El xc3xa1rea social posee amplios ventanales de piso a techo que permiten una entrada total de luz nat', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-los-rosales-3-habitaciones-4-banos-3-garajes/847-4004', 'Apartamento', '3900000000', 'PADS', 'Bogotxc3xa1 D.C.', 'Los Rosales', '4.660006796513658', '-74.05030510233371', 'Carrera 5 78-95', '110221', '275', '0', '2020-08-08', '6', 'NULL', 'Entre 0 y 5 anos', '4', '4', '3', '{\"CODIGO_WEB\": \"847-4004\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Los Rosales\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"LOS ROSALES\", \"ESTRATO\": \"6\", \"VALOR_DE_ARRIENDO\": \"$17.000.000\", \"VALOR_DE_VENTA\": \"$3.900.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$2.000.000\", \"AREA_CONSTRUIDA\": \"275 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"4\", \"PARQUEADERO\": \"3\", \"TIPO_DE_PARQUEADERO\": \"Independiente\", \"CONJUNTO_CERRADO\": \"Si\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 0 y 5 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Balcon\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"ZONA_DE_LAVANDERIA\": \"Si\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_ESTUDIO\": \"Madera\", \"EN_INTERIOR_Y/O_BLOQUE\": \"Si\", \"NUMERO_DE_PISO\": \"6\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_PARQUES\": \"Si\", \"CERCA_SUPERMERCADOS\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Hall de Alcobas\", \"Cuarto de escoltas\", \"Con chimenea\", \"Piscina\", \"Gimnasio\", \"Salon comunal\", \"Zona para ninos\", \"Sobre via secundaria\", \"Citofonos\"]}', 'Calle 93B #11A-84 piso 3', 'PADS:/ Calle 93B #11A-84 piso 3 CC Portobelo/ 2566701/ 3142007942/ PADS', '573142007942' ),( '681-M2194774', 'Venta de Apartamento en Santa ana oriental - Bogotxc3xa1 D.C. - 681-M2194774', 'Apartamento ubicado en el exclusivo sector de Santa Ana Oriental, el edifico esta rodeado de espectaculares casas, sector muy seguro . El apartamento cuenta con 3 alcobas, alcoba baxc3xb1o de servicio, estudio y terraza.', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-ana-oriental-3-habitaciones-3-banos-2-garajes/681-M2194774', 'Apartamento', '850000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'SANTA ANA ORIENTAL', '4.688530921936035', '-74.03411865234375', 'Calle 110 6-50', '110111', '121', '117', '2020-08-08', '6', 'NULL', 'Mas de 20 anos', '3', '3', '2', '{\"CODIGO_WEB\": \"681-M2194774\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA ANA ORIENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA ANA ORIENTAL\", \"ESTRATO\": \"6\", \"VALOR_DE_VENTA\": \"$850.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$600.000\", \"AREA_CONSTRUIDA\": \"121 m\", \"AREA_PRIVADA\": \"117 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"2\", \"TIPO_DE_PARQUEADERO\": \"Propio\", \"PARQUEADERO_CUBIERTO\": \"Si\", \"DEPOSITOS\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"ESTUDIO_O_BIBLIOTECA\": \"Si\", \"TERRAZA/BALCON\": \"Terraza\", \"AREA_TERRAZA/BALCON\": \"30\", \"TIPO_DE_COCINA\": \"Lineal\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_INSTALACION_DE_GAS\": \"Natural\", \"TIPO_DE_ESTUFA\": \"Gas\", \"TIPO_DE_CALENTADOR\": \"Gas\", \"VISTA\": \"Exterior\", \"CUARTO_DE_SERVICIO\": \"Si\", \"NUMERO_DE_CLOSETS\": \"3\", \"TIPO_DE_ACABADO_PISO\": \"Madera\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Madera\", \"TIPO_DE_PISO_EN_COMEDOR\": \"Madera\", \"TIPO_DE_PISO_EN_SALA\": \"Madera\", \"NUMERO_DE_PISO\": \"2\", \"NUMERO_DE_ASCENSORES\": \"1\", \"VIGILANCIA\": \"24hrs\", \"CERCA_TRANSPORTE_PUBLICO\": \"Si\", \"CERCA_COLEGIOS_/_UNIVERSIDADES\": \"Si\", \"CERCA_CENTROS_COMERCIALES\": \"Si\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Con chimenea\", \"Citofonos\"]}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ (1) 6204300/ olga lucia rueda salazar/ 3124332588/ AV 15 # 124-65/ Agente Inmobiliario:/ 3112514804/ PUNTO INMOBILIARIO - B.M. S.A.S', '573112514804' ),( '11233-2377790', 'Venta de Bodega en San rafael - Bogotxc3xa1 D.C. - 11233-2377790', 'Bodega en lote esquinero de casi 147 mts2 12 de frente x 15 de fondo situada en la Carrera 53C con Calle 5B. Las vxc3xadas de acceso a la bodega son amplias y rxc3xa1pidas, muy cerca de Puente Aranda. Tiene una entrada para vehxc3xadculos medianos sobre la carrera. Estxc3xa1 situada en San Rafael, un barrio residencial e industrial por lo que puede ser destinada a bodegaje y o a actividades industriales limpias y de bajo ruido. Piso en concreto y sercha metalica en el techo. Incluye una pequexc3xb1a', 'https://www.metrocuadrado.com/inmueble/venta-bodega-bogota-san-rafael/11233-2377790', 'Bodega', '480000000', 'LOYAL CITY BIENES RAICES', 'Bogotxc3xa1 D.C.', 'SAN RAFAEL', '4.622081608857017', '-74.1125116798233', 'Carrera 53BBIS 5A-52', '111611', '147', '0', '2020-08-08', '3', 'NULL', 'Mas de 20 anos', '', '', '', '{\"CODIGO_WEB\": \"11233-2377790\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SAN RAFAEL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SAN RAFAEL\", \"ESTRATO\": \"3\", \"VALOR_DE_VENTA\": \"$480.000.000\", \"AREA_CONSTRUIDA\": \"147 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"TIPO_BODEGA\": \"Otro\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"complementsAndFinishes\": []}', 'CARRERA 67 # 46 -14', '3002660813/ LOYAL CITY BIENES RAICES:/ LOYAL CITY BIENES RAICES', '573118448126' ),( '2266-1972413', 'Venta de Apartamento en Santa barbara occidental - Bogotxc3xa1 D.C. - 2266-1972413', 'Vendo apartamento en piso 4, exterior en muy buen estado. El apto tiene 3 alcobas, 2 baxc3xb1os, sala comedor con chimenea. Cocina cerrada, cuarto y baxc3xb1o de servicio. Zona de lavanderxc3xada. Un Parqueadero cubierto. Los pisos del apto estxc3xa1n en madera laminada. Cocina y baxc3xb1os en buen estado de conservacixc3xb3n. Edificio con vigilancia 24 horas. En excelente estado, ladrillo a la vista. Linda recepcixc3xb3n, excelente ubicacion porque es residencial con bastante comercio por lo tanto se encuentra t', 'https://www.metrocuadrado.com/inmueble/venta-apartamento-bogota-santa-barbara-occidental-3-habitaciones-3-banos-1-garajes/2266-1972413', 'Apartamento', '425000000', 'Boutique de Bienes Raices', 'Bogotxc3xa1 D.C.', 'SANTA BARBARA OCCIDENTAL', '4.706132731731016', '-74.05043758211897', 'Avenida Calle 127 20-4', '110121', '93', '93', '2020-08-08', '5', 'NULL', 'Mas de 20 anos', '3', '3', '1', '{\"CODIGO_WEB\": \"2266-1972413\", \"NOMBRE_COMUN_DEL_BARRIO\": \"SANTA BARBARA OCCIDENTAL\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SANTA BARBARA OCCIDENTAL\", \"ESTRATO\": \"5\", \"VALOR_DE_VENTA\": \"$425.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$385.000\", \"AREA_CONSTRUIDA\": \"93 m\", \"AREA_PRIVADA\": \"93 m\", \"HABITACIONES\": \"3\", \"BANOS\": \"3\", \"PARQUEADERO\": \"1\", \"TIEMPO_DE_CONSTRUIDO\": \"Mas de 20 anos\", \"TIPO_COMEDOR\": \"Sala Comedor\", \"TIPO_DE_ACABADO_PISO\": \"Otro\", \"TIPO_DE_PISO_EN_ALCOBAS\": \"Otro\", \"NUMERO_DE_PISO\": \"4\", \"NUMERO_DE_ASCENSORES\": \"1\", \"ASCENSOR\": \"Si\", \"complementsAndFinishes\": [\"Comedor auxiliar\", \"Hall de Alcobas\", \"Con chimenea\", \"Citofonos\"]}', 'na', 'XIMENA FERNANDEZ/ 3138528340/ CL 0 # 0 - 0/ XIMENA FERNANDEZ:', '573138528340' ),( '681-458', 'Venta de Local Comercial en Puente aranda - Bogotxc3xa1 D.C. - 681-458', 'LOCAL UBICADO EN EL CENTRO COMERCIA OUTLET FACTORY, CENTRADO EN EL PRIMER PISO.LUGAR CONCURRIDO MUY BIEN UBICADO.', 'https://www.metrocuadrado.com/inmueble/venta-local-comercial-bogota-salazar-gomez/681-458', 'Local Comercial', '3460000000', 'PUNTO INMOBILIARIO - B.M. S.A.S', 'Bogotxc3xa1 D.C.', 'Puente Aranda', '4.628376343649677', '-74.11732013354063', 'Avenida Calle 9-0', '111611', '213.00', '213.00', '2020-08-08', '7', 'NULL', 'Entre 5 y 10 anos', '', '', '', '{\"CODIGO_WEB\": \"681-458\", \"NOMBRE_COMUN_DEL_BARRIO\": \"Puente Aranda\", \"NOMBRE_DEL_BARRIO_CATASTRAL\": \"SALAZAR GOMEZ\", \"ESTRATO\": \"7\", \"VALOR_DE_VENTA\": \"$3.460.000.000\", \"VALOR_DE_ADMINISTRACION\": \"$3.900.000\", \"AREA_CONSTRUIDA\": \"213.00 m\", \"AREA_PRIVADA\": \"213.00 m\", \"TIEMPO_DE_CONSTRUIDO\": \"Entre 5 y 10 anos\", \"TIPO_LOCAL\": \"Otro\", \"complementsAndFinishes\": []}', 'AVENIDA 15 No 124 -65 OF 704', '6204300/ 3124332588/ AV 15 # 124-65/ PUNTO INMOBILIARIO - B.M. S.A.S:/ PUNTO INMOBILIARIO - B.M. S.A.S', '573124332588' )"
#mycursor.execute(mybulk)
myresult = mycursor.fetchall()
#mycursor.execute("ALTER TABLE states ADD PRIMARY KEY (id_web)")
| 2,071.555556
| 43,125
| 0.694422
| 12,412
| 93,220
| 5.074041
| 0.082662
| 0.012385
| 0.01899
| 0.01753
| 0.994871
| 0.994871
| 0.994871
| 0.994871
| 0.994871
| 0.994871
| 0
| 0.105536
| 0.10206
| 93,220
| 44
| 43,126
| 2,118.636364
| 0.646847
| 0.488318
| 0
| 0
| 0
| 1.228571
| 0.640849
| 0.005748
| 0
| 0
| 0
| 0.022727
| 0
| 1
| 0
| false
| 0.028571
| 0.085714
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
37e03a2b78a94c424c74848e33459cc7a4920cc8
| 343
|
py
|
Python
|
tests/internal/instance_type/test_instance_type_d_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_d_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_d_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | 1
|
2021-12-15T11:58:22.000Z
|
2021-12-15T11:58:22.000Z
|
# Testing module instance_type.d
import pytest
import ec2_compare.internal.instance_type.d
def test_get_internal_data_instance_type_d_get_instances_list():
assert len(ec2_compare.internal.instance_type.d.get_instances_list()) > 0
def test_get_internal_data_instance_type_d_get():
assert len(ec2_compare.internal.instance_type.d.get) > 0
| 34.3
| 75
| 0.845481
| 56
| 343
| 4.732143
| 0.339286
| 0.271698
| 0.29434
| 0.241509
| 0.826415
| 0.826415
| 0.611321
| 0.611321
| 0.611321
| 0
| 0
| 0.015773
| 0.075802
| 343
| 9
| 76
| 38.111111
| 0.820189
| 0.087464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
37f6822c952d4950cb8f56a716319fb9dcb24ea3
| 10,071
|
py
|
Python
|
src/openprocurement/tender/cfaua/views/bid_document.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/cfaua/views/bid_document.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/cfaua/views/bid_document.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import json_view
from openprocurement.api.validation import (
validate_file_update,
validate_patch_document_data,
validate_file_upload,
)
from openprocurement.tender.core.validation import (
validate_bid_document_operation_period,
unless_allowed_by_qualification_milestone,
validate_bid_document_operation_in_award_status,
)
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.cfaua.utils import (
bid_financial_documents_resource,
bid_eligibility_documents_resource,
bid_qualification_documents_resource,
)
from openprocurement.tender.openua.views.bid_document import TenderUaBidDocumentResource
from openprocurement.tender.openua.validation import (
validate_download_bid_document,
validate_update_bid_document_confidentiality,
)
from openprocurement.tender.openeu.validation import (
validate_view_bid_documents_allowed_in_tender_status,
validate_view_financial_bid_documents_allowed_in_tender_status,
validate_view_bid_documents_allowed_in_bid_status,
validate_view_financial_bid_documents_allowed_in_bid_status,
validate_bid_document_operation_in_bid_status,
)
from openprocurement.tender.cfaua.validation import (
validate_add_bid_document_not_in_allowed_tender_status,
validate_add_bid_financial_document_not_in_allowed_tender_status,
)
# @optendersresource(
# name="closeFrameworkAgreementUA:Tender Bid Documents",
# collection_path="/tenders/{tender_id}/bids/{bid_id}/documents",
# path="/tenders/{tender_id}/bids/{bid_id}/documents/{document_id}",
# procurementMethodType="closeFrameworkAgreementUA",
# description="Tender EU bidder documents",
# )
class TenderEUBidDocumentResource(TenderUaBidDocumentResource):
container = "documents"
@json_view(
validators=(
validate_view_bid_documents_allowed_in_tender_status,
validate_view_bid_documents_allowed_in_bid_status,
),
permission="view_tender"
)
def collection_get(self):
return super(TenderEUBidDocumentResource, self).collection_get()
@json_view(
validators=(
validate_file_upload,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def collection_post(self):
return super(TenderEUBidDocumentResource, self).collection_post()
@json_view(
validators=(
validate_view_bid_documents_allowed_in_tender_status,
validate_view_bid_documents_allowed_in_bid_status,
validate_download_bid_document,
),
permission="view_tender",
)
def get(self):
return super(TenderEUBidDocumentResource, self).get()
@json_view(
content_type="application/json",
validators=(
validate_patch_document_data,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def patch(self):
return super(TenderEUBidDocumentResource, self).patch()
@json_view(
validators=(
validate_file_update,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def put(self):
return super(TenderEUBidDocumentResource, self).put()
# @bid_eligibility_documents_resource(
# name="closeFrameworkAgreementUA:Tender Bid Eligibility Documents",
# collection_path="/tenders/{tender_id}/bids/{bid_id}/eligibility_documents",
# path="/tenders/{tender_id}/bids/{bid_id}/eligibility_documents/{document_id}",
# procurementMethodType="closeFrameworkAgreementUA",
# description="Tender EU bidder eligibility documents",
# )
class TenderEUBidEligibilityDocumentResource(TenderEUBidDocumentResource):
container = "eligibilityDocuments"
# @bid_financial_documents_resource(
# name="closeFrameworkAgreementUA:Tender Bid Financial Documents",
# collection_path="/tenders/{tender_id}/bids/{bid_id}/financial_documents",
# path="/tenders/{tender_id}/bids/{bid_id}/financial_documents/{document_id}",
# procurementMethodType="closeFrameworkAgreementUA",
# description="Tender EU bidder financial documents",
# )
class TenderEUBidFinancialDocumentResource(TenderEUBidDocumentResource):
container = "financialDocuments"
@json_view(
validators=(
validate_view_financial_bid_documents_allowed_in_tender_status,
validate_view_financial_bid_documents_allowed_in_bid_status,
),
permission="view_tender",
)
def collection_get(self):
return super(TenderEUBidFinancialDocumentResource, self).collection_get()
@json_view(
validators=(
validate_view_financial_bid_documents_allowed_in_tender_status,
validate_view_financial_bid_documents_allowed_in_bid_status,
validate_download_bid_document,
),
permission="view_tender",
)
def get(self):
return super(TenderEUBidFinancialDocumentResource, self).get()
@json_view(
validators=(
validate_file_upload,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_financial_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def collection_post(self):
return super(TenderEUBidFinancialDocumentResource, self).collection_post()
@json_view(
content_type="application/json",
validators=(
validate_patch_document_data,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_financial_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def patch(self):
return super(TenderEUBidFinancialDocumentResource, self).patch()
@json_view(
validators=(
validate_file_update,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_financial_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def put(self):
return super(TenderEUBidFinancialDocumentResource, self).put()
# @bid_qualification_documents_resource(
# name="closeFrameworkAgreementUA:Tender Bid Qualification Documents",
# collection_path="/tenders/{tender_id}/bids/{bid_id}/qualification_documents",
# path="/tenders/{tender_id}/bids/{bid_id}/qualification_documents/{document_id}",
# procurementMethodType="closeFrameworkAgreementUA",
# description="Tender EU bidder qualification documents",
# )
class TenderEUBidQualificationDocumentResource(TenderEUBidFinancialDocumentResource):
container = "qualificationDocuments"
@json_view(
validators=(
validate_file_upload,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def collection_post(self):
return super(TenderEUBidFinancialDocumentResource, self).collection_post()
@json_view(
content_type="application/json",
validators=(
validate_patch_document_data,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def patch(self):
return super(TenderEUBidFinancialDocumentResource, self).patch()
@json_view(
validators=(
validate_file_update,
validate_bid_document_operation_period,
validate_bid_document_operation_in_award_status,
validate_update_bid_document_confidentiality,
validate_bid_document_operation_in_bid_status,
unless_allowed_by_qualification_milestone(
validate_add_bid_document_not_in_allowed_tender_status
),
),
permission="edit_bid",
)
def put(self):
return super(TenderEUBidFinancialDocumentResource, self).put()
| 38.003774
| 88
| 0.722173
| 978
| 10,071
| 6.888548
| 0.079755
| 0.078373
| 0.084607
| 0.124685
| 0.82366
| 0.798575
| 0.742615
| 0.731928
| 0.721835
| 0.607392
| 0
| 0.000126
| 0.213286
| 10,071
| 264
| 89
| 38.147727
| 0.850183
| 0.148943
| 0
| 0.766355
| 0
| 0
| 0.02728
| 0.002576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060748
| false
| 0
| 0.042056
| 0.060748
| 0.200935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
533a76d3ceae3008ff3d6996e810eb87ecb65e10
| 16,210
|
py
|
Python
|
test/oscprob2nu_plot.py
|
mbustama/NuOscProbExact
|
76c816e75a995fbeeb98b0249a7b9e03769aa12b
|
[
"MIT"
] | 14
|
2019-04-30T01:40:45.000Z
|
2022-03-23T05:00:13.000Z
|
test/oscprob2nu_plot.py
|
mbustama/NuOscProbExact
|
76c816e75a995fbeeb98b0249a7b9e03769aa12b
|
[
"MIT"
] | 1
|
2022-03-11T14:33:03.000Z
|
2022-03-11T14:33:03.000Z
|
test/oscprob2nu_plot.py
|
mbustama/NuOscProbExact
|
76c816e75a995fbeeb98b0249a7b9e03769aa12b
|
[
"MIT"
] | 9
|
2019-04-30T16:47:25.000Z
|
2022-03-23T05:00:19.000Z
|
# -*- coding: utf-8 -*-
r"""Routines to plot two-neutrino flavor-transition probabilities.
This module contains contains routines to plot two-neutrino
oscillation probabilities vs. the neutrino baseline and energy. These
routines are used by run_testsuite.py to produce a suite of test plots.
Routine listings
----------------
* plot_probability_2nu_vs_baseline - Plot probabilities vs. baseline
* plot_probability_2nu_vs_energy - Plot probabilities vs. energy
Created: 2019/04/22 18:35
Last modified: 2019/04/22 19:31
"""
__version__ = "1.0"
__author__ = "Mauricio Bustamante"
__email__ = "mbustamante@gmail.com"
from numpy import *
import numpy as np
from pylab import *
from matplotlib import *
import matplotlib as mpl
import sys
sys.path.append('../src')
sys.path.append('../test')
import oscprob2nu
import hamiltonians2nu
from globaldefs import *
def plot_probability_2nu_vs_baseline(
case, sector, energy=1.e-1,
log10_l_min=0.0, log10_l_max=3.0, log10_l_npts=6000,
plot_prob_ee=True, plot_prob_em=True, plot_prob_mm=False,
output_filename='prob_vs_baseline', output_format='pdf',
output_path='../fig/', legend_loc='center left',
legend_ncol=1):
r"""Generates and saves a plot of 2nu probabilities vs. baseline.
Generates a plot of two-neutrino oscillation probabilities vs.
baseline, for a fixed neutrino energy. The probabilities to be
plotted are turned on and off via the flags plot_prob_ee,
plot_prob_em, etc. (At least one of them must be True.) The
parameter 'case' selects between 'vacuum', 'matter', 'nsi', and
'liv' (see below). The plot is saved with the provided name and
file format under the specified path.
Parameters
----------
case : str
Not optional. Must be one of the following: 'vacuum', 'matter',
'nsi', or 'liv'. In each case, the probabilities are computed
using the default parameter values pulled from globaldefs.
sector : str
Not optional. Must be one of the following: '12' (for nu_e <-->
nu_mu oscillations) of '23' (for nu_mu <--> nu_tau
oscillations).
energy : float, optional
Neutrino energy [GeV].
log10_l_min : float, optional
Log10 of the minimum baseline [km].
log10_l_max : float, optional
Log10 of the maximum baseline [km].
log10_l_npts : int, optional
Number of baseline values at which to compute the probabilities.
plot_prob_ee : bool, optional
True to plot Pee (if sector == '12') or Pmm (if sector == '23),
False otherwise.
plot_prob_em : bool, optional
True to plot Pem (if sector == '12') or Pmt (if sector == '23),
False otherwise.
plot_prob_mm : bool, optional
True to plot Pmm (if sector == '12') or Ptt (if sector == '23),
False otherwise.
output_filename : str, optional
File name of plot to save (without the file extension).
output_format : str, optional
File extension of the plot to save (e.g., 'pdf', 'png', 'jpg').
output_path : str, optional
File path where to save the plot.
legend_loc : str, optional
Location of the legend in the plot. Must be one of the allowed
values of the plot routine of matplotlib.
legend_ncol : int, optional
Number of columns to include in the legend box. Must be at
least 1.
Returns
-------
None
The plot is generated and saved.
"""
if (not plot_prob_ee) and (not plot_prob_em) \
and (not plot_prob_me) and (not plot_prob_mm):
quit()
# Baselines, L
log10_l_val = np.linspace(log10_l_min, log10_l_max, log10_l_npts)
l_val =[10.**x for x in log10_l_val]
if sector == '12':
sth = S12_NO_BF
Dm2 = D21_NO_BF
label_ee = r'$P_{\nu_e \to \nu_e}$'
label_em = r'$P_{\nu_e \to \nu_\mu}$'
label_mm = r'$P_{\nu_\mu \to \nu_\mu}$'
color_ee = 'C0'
color_em = 'C1'
color_mm = 'C4'
elif sector == '23':
sth = S23_NO_BF
Dm2 = D31_NO_BF
label_ee = r'$P_{\nu_\mu \to \nu_\mu}$'
label_em = r'$P_{\nu_\mu \to \nu_\tau}$'
label_mm = r'$P_{\nu_\tau \to \nu_\tau}$'
color_ee = 'C4'
color_em = 'C5'
color_mm = 'C8'
h_vacuum_energy_independent = \
hamiltonians2nu.hamiltonian_2nu_vacuum_energy_independent(sth, Dm2)
if (case.lower() == 'vacuum'):
hamiltonian = np.multiply(1./energy/1.e9, h_vacuum_energy_independent)
label_case = r'Vacuum'
elif (case.lower() == 'matter'):
hamiltonian = hamiltonians2nu.hamiltonian_2nu_matter( \
h_vacuum_energy_independent,
energy*1.e9,
VCC_EARTH_CRUST)
label_case = r'Matter'
elif (case.lower() == 'nsi'):
hamiltonian = hamiltonians2nu.hamiltonian_2nu_nsi( \
h_vacuum_energy_independent,
energy*1.e9,
VCC_EARTH_CRUST,
EPS_2)
label_case = r'NSI'
elif (case.lower() == 'liv'):
hamiltonian = hamiltonians2nu.hamiltonian_2nu_liv( \
h_vacuum_energy_independent,
energy*1.e9,
SXI12,
B1, B3, LAMBDA)
label_case = r'CPT-odd LIV'
# Each element of prob: [Pee, Pem, Pmm]
prob = [oscprob2nu.probabilities_2nu( hamiltonian,
l*CONV_KM_TO_INV_EV) \
for l in l_val]
prob_ee = [x[0] for x in prob]
prob_em = [x[1] for x in prob]
prob_mm = [x[3] for x in prob]
# Formatting
mpl.rcParams['xtick.labelsize']=26
mpl.rcParams['ytick.labelsize']=26
mpl.rcParams['legend.fontsize']=26
mpl.rcParams['legend.borderpad']=0.4
mpl.rcParams['axes.labelpad']=10
mpl.rcParams['ps.fonttype']=42
mpl.rcParams['pdf.fonttype']=42
fig = plt.figure(figsize=[9,9])
ax = fig.add_subplot(1,1,1)
ax.set_xlabel(r'Baseline $L$ [km]', fontsize=25)
ax.set_ylabel(r'Two-neutrino probability', fontsize=25)
yaxis_minor_locator = mpl.ticker.MultipleLocator(0.1)
ax.yaxis.set_minor_locator(yaxis_minor_locator)
ax.tick_params('both', length=10, width=2, which='major')
ax.tick_params('both', length=5, width=1, which='minor')
ax.tick_params(axis='both', which='major', pad=10, direction='in')
ax.tick_params(axis='both', which='minor', pad=10, direction='in')
ax.tick_params(axis='x', which='minor', bottom=True)
ax.tick_params(axis='x', which='minor', top=True)
ax.tick_params(axis='y', which='minor', left=True)
ax.tick_params(axis='y', which='minor', right=True)
ax.tick_params(bottom=True, top=True, left=True, right=True)
ax.set_xlim([10.**log10_l_min, 10.**log10_l_max])
ax.set_xscale('log')
ax.set_ylim([0.0, 1.0])
# Plot
if (plot_prob_ee):
ax.plot(l_val, prob_ee, label=label_ee,
color=color_ee, zorder=1)
if (plot_prob_em):
ax.plot(l_val, prob_em, label=label_em,
color=color_em, zorder=1)
if (plot_prob_mm):
ax.plot(l_val, prob_mm, label=label_mm,
color=color_mm, zorder=1)
# Legend
ax.legend(loc=legend_loc, frameon=False, ncol=legend_ncol)
# Annotations
ax.annotate( label_case, xy = (0.05, 0.86), \
xycoords='axes fraction', color='k', fontsize=25,
horizontalalignment='left', rotation=0, zorder=2 )
ax.annotate( r'$\log_{10}(E/{\rm GeV}) = $' + \
str(int(log10(energy)*100.)/100.),
xy = (0.05, 0.80), xycoords='axes fraction', color='k', fontsize=25,
horizontalalignment='left', rotation=0, zorder=2 )
pylab.savefig(output_path+output_filename+'.'+output_format,
bbox_inches='tight', dpi=100)
plt.close()
return
def plot_probability_2nu_vs_energy(
case, sector, baseline=1.3e3,
log10_energy_min=-1.0, log10_energy_max=1.0,
log10_energy_npts=200,
plot_prob_ee=True, plot_prob_em=True, plot_prob_mm=False,
output_filename='prob_vs_energy', output_format='pdf',
output_path='../fig/', legend_loc='center right',
legend_ncol=1):
r"""Generates and saves a plot of 2nu probabilities vs. energy.
Generates a plot of two-neutrino oscillation probabilities vs.
energy, for a fixed neutrino baseline. The probabilities to be
plotted are turned on and off via the flags plot_prob_ee,
plot_prob_em, etc. (At least one of them must be True.) The
parameter 'case' selects between 'vacuum', 'matter', 'nsi', and
'liv' (see below). The plot is saved with the provided name and
file format under the specified path.
Parameters
----------
case : str
Not optional. Must be one of the following: 'vacuum', 'matter',
'nsi', or 'liv'. In each case, the probabilities are computed
using the default parameter values pulled from globaldefs.
sector : str
Not optional. Must be one of the following: '12' (for nu_e <-->
nu_mu oscillations) of '23' (for nu_mu <--> nu_tau
oscillations).
baseline : float, optional
Neutrino baseline [km].
log10_energy_min : float, optional
Log10 of the minimum energy [GeV].
log10_energy_max : float, optional
Log10 of the maximum energy [GeV].
log10_energy_npts : int, optional
Number of energy values at which to compute the probabilities.
plot_prob_ee : bool, optional
True to plot Pee (if sector == '12') or Pmm (if sector == '23),
False otherwise.
plot_prob_em : bool, optional
True to plot Pem (if sector == '12') or Pmt (if sector == '23),
False otherwise.
plot_prob_mm : bool, optional
True to plot Pmm (if sector == '12') or Ptt (if sector == '23),
False otherwise.
output_filename : str, optional
File name of plot to save (without the file extension).
output_format : str, optional
File extension of the plot to save (e.g., 'pdf', 'png', 'jpg').
output_path : str, optional
File path where to save the plot.
legend_loc : str, optional
Location of the legend in the plot. Must be one of the allowed
values of the plot routine of matplotlib.
legend_ncol : int, optional
Number of columns to include in the legend box. Must be at
least 1.
Returns
-------
None
The plot is generated and saved.
"""
if (not plot_prob_ee) and (not plot_prob_em) \
and (not plot_prob_me) and (not plot_prob_mm):
quit()
baseline = baseline*CONV_KM_TO_INV_EV # [eV^{-1}]
# Neutrino energies
log10_energy_val = np.linspace( log10_energy_min, log10_energy_max,
log10_energy_npts)
energy_val =[10.**x for x in log10_energy_val]
if sector == '12':
sth = S12_NO_BF
Dm2 = D21_NO_BF
label_ee = r'$P_{\nu_e \to \nu_e}$'
label_em = r'$P_{\nu_e \to \nu_\mu}$'
label_mm = r'$P_{\nu_\mu \to \nu_\mu}$'
color_ee = 'C0'
color_em = 'C1'
color_mm = 'C4'
elif sector == '23':
sth = S23_NO_BF
Dm2 = D31_NO_BF
label_ee = r'$P_{\nu_\mu \to \nu_\mu}$'
label_em = r'$P_{\nu_\mu \to \nu_\tau}$'
label_mm = r'$P_{\nu_\tau \to \nu_\tau}$'
color_ee = 'C4'
color_em = 'C5'
color_mm = 'C8'
h_vacuum_energy_independent = \
hamiltonians2nu.hamiltonian_2nu_vacuum_energy_independent(sth, Dm2)
if (case.lower() == 'vacuum'):
prob = [oscprob2nu.probabilities_2nu( \
np.multiply(1./energy/1.e9, h_vacuum_energy_independent),
baseline) \
for energy in energy_val]
label_case = r'Vacuum'
elif (case.lower() == 'matter'):
prob = [oscprob2nu.probabilities_2nu( \
hamiltonians2nu.hamiltonian_2nu_matter( \
h_vacuum_energy_independent,
energy*1.e9,
VCC_EARTH_CRUST),
baseline) \
for energy in energy_val]
label_case = r'Matter'
elif (case.lower() == 'nsi'):
prob = [oscprob2nu.probabilities_2nu( \
hamiltonians2nu.hamiltonian_2nu_nsi( \
h_vacuum_energy_independent,
energy*1.e9,
VCC_EARTH_CRUST,
EPS_2),
baseline) \
for energy in energy_val]
label_case = r'NSI'
elif (case.lower() == 'liv'):
prob = [oscprob2nu.probabilities_2nu( \
hamiltonians2nu.hamiltonian_2nu_liv( \
h_vacuum_energy_independent,
energy*1.e9,
SXI12,
B1, B3, LAMBDA),
baseline) \
for energy in energy_val]
label_case = r'CPT-odd LIV'
# Each element of prob: [Pee, Pem, Pmm]
prob_ee = [x[0] for x in prob]
prob_em = [x[1] for x in prob]
prob_mm = [x[3] for x in prob]
# Formatting
mpl.rcParams['xtick.labelsize']=26
mpl.rcParams['ytick.labelsize']=26
mpl.rcParams['legend.fontsize']=26
mpl.rcParams['legend.borderpad']=0.4
mpl.rcParams['axes.labelpad']=10
mpl.rcParams['ps.fonttype']=42
mpl.rcParams['pdf.fonttype']=42
fig = plt.figure(figsize=[9,9])
ax = fig.add_subplot(1,1,1)
ax.set_xlabel(r'Neutrino energy $E$ [GeV]', fontsize=25)
ax.set_ylabel(r'Two-neutrino probability', fontsize=25)
yaxis_minor_locator = mpl.ticker.MultipleLocator(0.1)
ax.yaxis.set_minor_locator(yaxis_minor_locator)
ax.tick_params('both', length=10, width=2, which='major')
ax.tick_params('both', length=5, width=1, which='minor')
ax.tick_params(axis='both', which='major', pad=10, direction='in')
ax.tick_params(axis='both', which='minor', pad=10, direction='in')
ax.tick_params(axis='x', which='minor', bottom=True)
ax.tick_params(axis='x', which='minor', top=True)
ax.tick_params(axis='y', which='minor', left=True)
ax.tick_params(axis='y', which='minor', right=True)
ax.tick_params(bottom=True, top=True, left=True, right=True)
ax.set_xlim([10.**log10_energy_min, 10.**log10_energy_max])
ax.set_xscale('log')
ax.set_ylim([0.0, 1.0])
# Plot
if (plot_prob_ee):
ax.plot(energy_val, prob_ee, label=label_ee,
color=color_ee, zorder=1)
if (plot_prob_em):
ax.plot(energy_val, prob_em, label=label_em,
color=color_em, zorder=1)
if (plot_prob_mm):
ax.plot(energy_val, prob_mm, label=label_mm,
color=color_mm, zorder=1)
# Legend
ax.legend(loc=legend_loc, frameon=False, ncol=legend_ncol)
# Annotations
ax.annotate( label_case, xy = (0.05, 0.86), \
xycoords='axes fraction', color='k', fontsize=25,
horizontalalignment='left', rotation=0, zorder=2 )
ax.annotate( r'$\log_{10}(L/{\rm km}) = $' + \
str(int(log10(baseline/CONV_KM_TO_INV_EV)*100.)/100.),
xy = (0.05, 0.80), xycoords='axes fraction', color='k', fontsize=25,
horizontalalignment='left', rotation=0, zorder=2 )
pylab.savefig(output_path+output_filename+'.'+output_format,
bbox_inches='tight', dpi=100)
plt.close()
return
| 36.924829
| 78
| 0.585811
| 2,173
| 16,210
| 4.172572
| 0.139439
| 0.02647
| 0.023823
| 0.021176
| 0.856513
| 0.826734
| 0.822102
| 0.793868
| 0.776001
| 0.739164
| 0
| 0.036453
| 0.299383
| 16,210
| 438
| 79
| 37.009132
| 0.761909
| 0.300617
| 0
| 0.73251
| 0
| 0
| 0.100762
| 0.001927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00823
| false
| 0
| 0.037037
| 0
| 0.053498
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7255312d460f227a54f4e612bb979a5a49186ec4
| 531,508
|
py
|
Python
|
test/unit/test_catalog_management_v1.py
|
Bhaskers-Blu-Org1/platform-services-python-sdk
|
e634a6930f00ba884c75eb600675d1ad42f0fc59
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_catalog_management_v1.py
|
Bhaskers-Blu-Org1/platform-services-python-sdk
|
e634a6930f00ba884c75eb600675d1ad42f0fc59
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_catalog_management_v1.py
|
Bhaskers-Blu-Org1/platform-services-python-sdk
|
e634a6930f00ba884c75eb600675d1ad42f0fc59
|
[
"Apache-2.0"
] | 1
|
2020-07-30T10:27:14.000Z
|
2020-07-30T10:27:14.000Z
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import re
import requests
import responses
from ibm_platform_services.catalog_management_v1 import *
service = CatalogManagementV1(
authenticator=NoAuthAuthenticator()
)
base_url = 'https://cm.globalcatalog.cloud.ibm.com/api/v1-beta'
service.set_service_url(base_url)
##############################################################################
# Start of Service: Account
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_catalog_account
#-----------------------------------------------------------------------------
class TestGetCatalogAccount():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_catalog_account()
#--------------------------------------------------------
@responses.activate
def test_get_catalog_account_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogaccount')
mock_response = '{"id": "id", "account_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_catalog_account()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for update_catalog_account
#-----------------------------------------------------------------------------
class TestUpdateCatalogAccount():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# update_catalog_account()
#--------------------------------------------------------
@responses.activate
def test_update_catalog_account_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogaccount')
responses.add(responses.PUT,
url,
status=200)
# Construct a dict representation of a FilterTerms model
filter_terms_model = {}
filter_terms_model['filter_terms'] = ['testString']
# Construct a dict representation of a CategoryFilter model
category_filter_model = {}
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
# Construct a dict representation of a IDFilter model
id_filter_model = {}
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
# Construct a dict representation of a Filters model
filters_model = {}
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Set up parameter values
id = 'testString'
account_filters = filters_model
# Invoke method
response = service.update_catalog_account(
id=id,
account_filters=account_filters,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['account_filters'] == filters_model
#--------------------------------------------------------
# test_update_catalog_account_required_params()
#--------------------------------------------------------
@responses.activate
def test_update_catalog_account_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogaccount')
responses.add(responses.PUT,
url,
status=200)
# Invoke method
response = service.update_catalog_account()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_catalog_account_filters
#-----------------------------------------------------------------------------
class TestGetCatalogAccountFilters():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_catalog_account_filters()
#--------------------------------------------------------
@responses.activate
def test_get_catalog_account_filters_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogaccount/filters')
mock_response = '{"account_filters": [{"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}], "catalog_filters": [{"catalog": {"id": "id", "name": "name"}, "filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog = 'testString'
# Invoke method
response = service.get_catalog_account_filters(
catalog=catalog,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'catalog={}'.format(catalog) in query_string
#--------------------------------------------------------
# test_get_catalog_account_filters_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_catalog_account_filters_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogaccount/filters')
mock_response = '{"account_filters": [{"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}], "catalog_filters": [{"catalog": {"id": "id", "name": "name"}, "filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_catalog_account_filters()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# endregion
##############################################################################
# End of Service: Account
##############################################################################
##############################################################################
# Start of Service: Catalogs
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_catalogs
#-----------------------------------------------------------------------------
class TestListCatalogs():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_catalogs()
#--------------------------------------------------------
@responses.activate
def test_list_catalogs_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_catalogs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for create_catalog
#-----------------------------------------------------------------------------
class TestCreateCatalog():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_catalog()
#--------------------------------------------------------
@responses.activate
def test_create_catalog_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Feature model
feature_model = {}
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
# Construct a dict representation of a FilterTerms model
filter_terms_model = {}
filter_terms_model['filter_terms'] = ['testString']
# Construct a dict representation of a CategoryFilter model
category_filter_model = {}
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
# Construct a dict representation of a IDFilter model
id_filter_model = {}
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
# Construct a dict representation of a Filters model
filters_model = {}
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a dict representation of a SyndicationCluster model
syndication_cluster_model = {}
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
# Construct a dict representation of a SyndicationAuthorization model
syndication_authorization_model = {}
syndication_authorization_model['token'] = 'testString'
syndication_authorization_model['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a SyndicationHistory model
syndication_history_model = {}
syndication_history_model['namespaces'] = ['testString']
syndication_history_model['clusters'] = [syndication_cluster_model]
syndication_history_model['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a SyndicationResource model
syndication_resource_model = {}
syndication_resource_model['remove_related_components'] = True
syndication_resource_model['clusters'] = [syndication_cluster_model]
syndication_resource_model['history'] = syndication_history_model
syndication_resource_model['authorization'] = syndication_authorization_model
# Set up parameter values
id = 'testString'
rev = 'testString'
label = 'testString'
short_description = 'testString'
catalog_icon_url = 'testString'
tags = ['testString']
url = 'testString'
crn = 'testString'
offerings_url = 'testString'
features = [feature_model]
disabled = True
created = datetime.fromtimestamp(1580236840.123456, timezone.utc)
updated = datetime.fromtimestamp(1580236840.123456, timezone.utc)
resource_group_id = 'testString'
owning_account = 'testString'
catalog_filters = filters_model
syndication_settings = syndication_resource_model
# Invoke method
response = service.create_catalog(
id=id,
rev=rev,
label=label,
short_description=short_description,
catalog_icon_url=catalog_icon_url,
tags=tags,
url=url,
crn=crn,
offerings_url=offerings_url,
features=features,
disabled=disabled,
created=created,
updated=updated,
resource_group_id=resource_group_id,
owning_account=owning_account,
catalog_filters=catalog_filters,
syndication_settings=syndication_settings,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['_rev'] == 'testString'
assert req_body['label'] == 'testString'
assert req_body['short_description'] == 'testString'
assert req_body['catalog_icon_url'] == 'testString'
assert req_body['tags'] == ['testString']
assert req_body['url'] == 'testString'
assert req_body['crn'] == 'testString'
assert req_body['offerings_url'] == 'testString'
assert req_body['features'] == [feature_model]
assert req_body['disabled'] == True
assert req_body['created'] == '2020-01-28T18:40:40.123456Z'
assert req_body['updated'] == '2020-01-28T18:40:40.123456Z'
assert req_body['resource_group_id'] == 'testString'
assert req_body['owning_account'] == 'testString'
assert req_body['catalog_filters'] == filters_model
assert req_body['syndication_settings'] == syndication_resource_model
#--------------------------------------------------------
# test_create_catalog_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_catalog_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Invoke method
response = service.create_catalog()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
#-----------------------------------------------------------------------------
# Test Class for get_catalog
#-----------------------------------------------------------------------------
class TestGetCatalog():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_catalog()
#--------------------------------------------------------
@responses.activate
def test_get_catalog_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Invoke method
response = service.get_catalog(
catalog_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_catalog_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_catalog_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_catalog(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for replace_catalog
#-----------------------------------------------------------------------------
class TestReplaceCatalog():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# replace_catalog()
#--------------------------------------------------------
@responses.activate
def test_replace_catalog_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Feature model
feature_model = {}
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
# Construct a dict representation of a FilterTerms model
filter_terms_model = {}
filter_terms_model['filter_terms'] = ['testString']
# Construct a dict representation of a CategoryFilter model
category_filter_model = {}
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
# Construct a dict representation of a IDFilter model
id_filter_model = {}
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
# Construct a dict representation of a Filters model
filters_model = {}
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a dict representation of a SyndicationCluster model
syndication_cluster_model = {}
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
# Construct a dict representation of a SyndicationAuthorization model
syndication_authorization_model = {}
syndication_authorization_model['token'] = 'testString'
syndication_authorization_model['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a SyndicationHistory model
syndication_history_model = {}
syndication_history_model['namespaces'] = ['testString']
syndication_history_model['clusters'] = [syndication_cluster_model]
syndication_history_model['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a SyndicationResource model
syndication_resource_model = {}
syndication_resource_model['remove_related_components'] = True
syndication_resource_model['clusters'] = [syndication_cluster_model]
syndication_resource_model['history'] = syndication_history_model
syndication_resource_model['authorization'] = syndication_authorization_model
# Set up parameter values
catalog_identifier = 'testString'
id = 'testString'
rev = 'testString'
label = 'testString'
short_description = 'testString'
catalog_icon_url = 'testString'
tags = ['testString']
url = 'testString'
crn = 'testString'
offerings_url = 'testString'
features = [feature_model]
disabled = True
created = datetime.fromtimestamp(1580236840.123456, timezone.utc)
updated = datetime.fromtimestamp(1580236840.123456, timezone.utc)
resource_group_id = 'testString'
owning_account = 'testString'
catalog_filters = filters_model
syndication_settings = syndication_resource_model
# Invoke method
response = service.replace_catalog(
catalog_identifier,
id=id,
rev=rev,
label=label,
short_description=short_description,
catalog_icon_url=catalog_icon_url,
tags=tags,
url=url,
crn=crn,
offerings_url=offerings_url,
features=features,
disabled=disabled,
created=created,
updated=updated,
resource_group_id=resource_group_id,
owning_account=owning_account,
catalog_filters=catalog_filters,
syndication_settings=syndication_settings,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['_rev'] == 'testString'
assert req_body['label'] == 'testString'
assert req_body['short_description'] == 'testString'
assert req_body['catalog_icon_url'] == 'testString'
assert req_body['tags'] == ['testString']
assert req_body['url'] == 'testString'
assert req_body['crn'] == 'testString'
assert req_body['offerings_url'] == 'testString'
assert req_body['features'] == [feature_model]
assert req_body['disabled'] == True
assert req_body['created'] == '2020-01-28T18:40:40.123456Z'
assert req_body['updated'] == '2020-01-28T18:40:40.123456Z'
assert req_body['resource_group_id'] == 'testString'
assert req_body['owning_account'] == 'testString'
assert req_body['catalog_filters'] == filters_model
assert req_body['syndication_settings'] == syndication_resource_model
#--------------------------------------------------------
# test_replace_catalog_required_params()
#--------------------------------------------------------
@responses.activate
def test_replace_catalog_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Invoke method
response = service.replace_catalog(
catalog_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_replace_catalog_value_error()
#--------------------------------------------------------
@responses.activate
def test_replace_catalog_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.replace_catalog(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_catalog
#-----------------------------------------------------------------------------
class TestDeleteCatalog():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_catalog()
#--------------------------------------------------------
@responses.activate
def test_delete_catalog_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Invoke method
response = service.delete_catalog(
catalog_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_catalog_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_catalog_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_catalog(**req_copy)
# endregion
##############################################################################
# End of Service: Catalogs
##############################################################################
##############################################################################
# Start of Service: Enterprise
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_enterprise
#-----------------------------------------------------------------------------
class TestGetEnterprise():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_enterprise()
#--------------------------------------------------------
@responses.activate
def test_get_enterprise_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/enterprises/testString')
mock_response = '{"id": "id", "_rev": "rev", "account_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "account_groups": {"keys": {"id": "id", "account_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
enterprise_id = 'testString'
# Invoke method
response = service.get_enterprise(
enterprise_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_enterprise_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_enterprise_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/enterprises/testString')
mock_response = '{"id": "id", "_rev": "rev", "account_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "account_groups": {"keys": {"id": "id", "account_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
enterprise_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"enterprise_id": enterprise_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_enterprise(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for replace_enterprise
#-----------------------------------------------------------------------------
class TestReplaceEnterprise():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# replace_enterprise()
#--------------------------------------------------------
@responses.activate
def test_replace_enterprise_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/enterprises/testString')
responses.add(responses.PUT,
url,
status=200)
# Construct a dict representation of a FilterTerms model
filter_terms_model = {}
filter_terms_model['filter_terms'] = ['testString']
# Construct a dict representation of a CategoryFilter model
category_filter_model = {}
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
# Construct a dict representation of a IDFilter model
id_filter_model = {}
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
# Construct a dict representation of a Filters model
filters_model = {}
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a dict representation of a AccountGroup model
account_group_model = {}
account_group_model['id'] = 'testString'
account_group_model['account_filters'] = filters_model
# Construct a dict representation of a EnterpriseAccountGroups model
enterprise_account_groups_model = {}
enterprise_account_groups_model['keys'] = account_group_model
# Set up parameter values
enterprise_id = 'testString'
id = 'testString'
rev = 'testString'
account_filters = filters_model
account_groups = enterprise_account_groups_model
# Invoke method
response = service.replace_enterprise(
enterprise_id,
id=id,
rev=rev,
account_filters=account_filters,
account_groups=account_groups,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['_rev'] == 'testString'
assert req_body['account_filters'] == filters_model
assert req_body['account_groups'] == enterprise_account_groups_model
#--------------------------------------------------------
# test_replace_enterprise_required_params()
#--------------------------------------------------------
@responses.activate
def test_replace_enterprise_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/enterprises/testString')
responses.add(responses.PUT,
url,
status=200)
# Set up parameter values
enterprise_id = 'testString'
# Invoke method
response = service.replace_enterprise(
enterprise_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_replace_enterprise_value_error()
#--------------------------------------------------------
@responses.activate
def test_replace_enterprise_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/enterprises/testString')
responses.add(responses.PUT,
url,
status=200)
# Set up parameter values
enterprise_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"enterprise_id": enterprise_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.replace_enterprise(**req_copy)
# endregion
##############################################################################
# End of Service: Enterprise
##############################################################################
##############################################################################
# Start of Service: Offerings
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_consumption_offerings
#-----------------------------------------------------------------------------
class TestGetConsumptionOfferings():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_consumption_offerings()
#--------------------------------------------------------
@responses.activate
def test_get_consumption_offerings_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/offerings')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
digest = True
catalog = 'testString'
select = 'all'
include_hidden = True
# Invoke method
response = service.get_consumption_offerings(
digest=digest,
catalog=catalog,
select=select,
include_hidden=include_hidden,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'digest={}'.format('true' if digest else 'false') in query_string
assert 'catalog={}'.format(catalog) in query_string
assert 'select={}'.format(select) in query_string
assert 'includeHidden={}'.format('true' if include_hidden else 'false') in query_string
#--------------------------------------------------------
# test_get_consumption_offerings_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_consumption_offerings_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/offerings')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_consumption_offerings()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for list_offerings
#-----------------------------------------------------------------------------
class TestListOfferings():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_offerings()
#--------------------------------------------------------
@responses.activate
def test_list_offerings_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
digest = True
# Invoke method
response = service.list_offerings(
catalog_identifier,
digest=digest,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'digest={}'.format('true' if digest else 'false') in query_string
#--------------------------------------------------------
# test_list_offerings_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_offerings_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Invoke method
response = service.list_offerings(
catalog_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_list_offerings_value_error()
#--------------------------------------------------------
@responses.activate
def test_list_offerings_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.list_offerings(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for create_offering
#-----------------------------------------------------------------------------
class TestCreateOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_offering()
#--------------------------------------------------------
@responses.activate
def test_create_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Rating model
rating_model = {}
rating_model['one_star_count'] = 38
rating_model['two_star_count'] = 38
rating_model['three_star_count'] = 38
rating_model['four_star_count'] = 38
# Construct a dict representation of a Feature model
feature_model = {}
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
# Construct a dict representation of a Configuration model
configuration_model = {}
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
# Construct a dict representation of a Deployment model
deployment_model = {}
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a License model
license_model = {}
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
# Construct a dict representation of a Resource model
resource_model = {}
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
# Construct a dict representation of a Script model
script_model = {}
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
# Construct a dict representation of a State model
state_model = {}
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
# Construct a dict representation of a Validation model
validation_model = {}
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
# Construct a dict representation of a VersionEntitlement model
version_entitlement_model = {}
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
# Construct a dict representation of a Plan model
plan_model = {}
plan_model['id'] = 'testString'
plan_model['label'] = 'testString'
plan_model['name'] = 'testString'
plan_model['short_description'] = 'testString'
plan_model['long_description'] = 'testString'
plan_model['metadata'] = { 'foo': 'bar' }
plan_model['tags'] = ['testString']
plan_model['additional_features'] = [feature_model]
plan_model['created'] = '2020-01-28T18:40:40.123456Z'
plan_model['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model['deployments'] = [deployment_model]
# Construct a dict representation of a Version model
version_model = {}
version_model['id'] = 'testString'
version_model['_rev'] = 'testString'
version_model['crn'] = 'testString'
version_model['version'] = 'testString'
version_model['sha'] = 'testString'
version_model['created'] = '2020-01-28T18:40:40.123456Z'
version_model['updated'] = '2020-01-28T18:40:40.123456Z'
version_model['offering_id'] = 'testString'
version_model['catalog_id'] = 'testString'
version_model['kind_id'] = 'testString'
version_model['tags'] = ['testString']
version_model['repo_url'] = 'testString'
version_model['source_url'] = 'testString'
version_model['tgz_url'] = 'testString'
version_model['configuration'] = [configuration_model]
version_model['metadata'] = { 'foo': 'bar' }
version_model['validation'] = validation_model
version_model['required_resources'] = [resource_model]
version_model['single_instance'] = True
version_model['install'] = script_model
version_model['pre_install'] = [script_model]
version_model['entitlement'] = version_entitlement_model
version_model['licenses'] = [license_model]
version_model['image_manifest_url'] = 'testString'
version_model['deprecated'] = True
version_model['package_version'] = 'testString'
version_model['state'] = state_model
version_model['version_locator'] = 'testString'
version_model['console_url'] = 'testString'
version_model['long_description'] = 'testString'
version_model['whitelisted_accounts'] = ['testString']
# Construct a dict representation of a Kind model
kind_model = {}
kind_model['id'] = 'testString'
kind_model['format_kind'] = 'testString'
kind_model['target_kind'] = 'testString'
kind_model['metadata'] = { 'foo': 'bar' }
kind_model['install_description'] = 'testString'
kind_model['tags'] = ['testString']
kind_model['additional_features'] = [feature_model]
kind_model['created'] = '2020-01-28T18:40:40.123456Z'
kind_model['updated'] = '2020-01-28T18:40:40.123456Z'
kind_model['versions'] = [version_model]
kind_model['plans'] = [plan_model]
# Construct a dict representation of a RepoInfo model
repo_info_model = {}
repo_info_model['token'] = 'testString'
repo_info_model['type'] = 'testString'
# Set up parameter values
catalog_identifier = 'testString'
id = 'testString'
rev = 'testString'
url = 'testString'
crn = 'testString'
label = 'testString'
name = 'testString'
offering_icon_url = 'testString'
offering_docs_url = 'testString'
offering_support_url = 'testString'
tags = ['testString']
rating = rating_model
created = datetime.fromtimestamp(1580236840.123456, timezone.utc)
updated = datetime.fromtimestamp(1580236840.123456, timezone.utc)
short_description = 'testString'
long_description = 'testString'
features = [feature_model]
kinds = [kind_model]
permit_request_ibm_public_publish = True
ibm_publish_approved = True
public_publish_approved = True
public_original_crn = 'testString'
publish_public_crn = 'testString'
portal_approval_record = 'testString'
portal_ui_url = 'testString'
catalog_id = 'testString'
catalog_name = 'testString'
metadata = { 'foo': 'bar' }
disclaimer = 'testString'
hidden = True
provider = 'testString'
repo_info = repo_info_model
# Invoke method
response = service.create_offering(
catalog_identifier,
id=id,
rev=rev,
url=url,
crn=crn,
label=label,
name=name,
offering_icon_url=offering_icon_url,
offering_docs_url=offering_docs_url,
offering_support_url=offering_support_url,
tags=tags,
rating=rating,
created=created,
updated=updated,
short_description=short_description,
long_description=long_description,
features=features,
kinds=kinds,
permit_request_ibm_public_publish=permit_request_ibm_public_publish,
ibm_publish_approved=ibm_publish_approved,
public_publish_approved=public_publish_approved,
public_original_crn=public_original_crn,
publish_public_crn=publish_public_crn,
portal_approval_record=portal_approval_record,
portal_ui_url=portal_ui_url,
catalog_id=catalog_id,
catalog_name=catalog_name,
metadata=metadata,
disclaimer=disclaimer,
hidden=hidden,
provider=provider,
repo_info=repo_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['_rev'] == 'testString'
assert req_body['url'] == 'testString'
assert req_body['crn'] == 'testString'
assert req_body['label'] == 'testString'
assert req_body['name'] == 'testString'
assert req_body['offering_icon_url'] == 'testString'
assert req_body['offering_docs_url'] == 'testString'
assert req_body['offering_support_url'] == 'testString'
assert req_body['tags'] == ['testString']
assert req_body['rating'] == rating_model
assert req_body['created'] == '2020-01-28T18:40:40.123456Z'
assert req_body['updated'] == '2020-01-28T18:40:40.123456Z'
assert req_body['short_description'] == 'testString'
assert req_body['long_description'] == 'testString'
assert req_body['features'] == [feature_model]
assert req_body['kinds'] == [kind_model]
assert req_body['permit_request_ibm_public_publish'] == True
assert req_body['ibm_publish_approved'] == True
assert req_body['public_publish_approved'] == True
assert req_body['public_original_crn'] == 'testString'
assert req_body['publish_public_crn'] == 'testString'
assert req_body['portal_approval_record'] == 'testString'
assert req_body['portal_ui_url'] == 'testString'
assert req_body['catalog_id'] == 'testString'
assert req_body['catalog_name'] == 'testString'
assert req_body['metadata'] == { 'foo': 'bar' }
assert req_body['disclaimer'] == 'testString'
assert req_body['hidden'] == True
assert req_body['provider'] == 'testString'
assert req_body['repo_info'] == repo_info_model
#--------------------------------------------------------
# test_create_offering_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_offering_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
# Invoke method
response = service.create_offering(
catalog_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
#--------------------------------------------------------
# test_create_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_create_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.create_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for import_offering_version
#-----------------------------------------------------------------------------
class TestImportOfferingVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# import_offering_version()
#--------------------------------------------------------
@responses.activate
def test_import_offering_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/version')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
tags = ['testString']
target_kinds = ['testString']
target_version = 'testString'
include_config = True
repo_type = 'testString'
x_auth_token = 'testString'
# Invoke method
response = service.import_offering_version(
catalog_identifier,
offering_id,
zipurl,
tags=tags,
target_kinds=target_kinds,
target_version=target_version,
include_config=include_config,
repo_type=repo_type,
x_auth_token=x_auth_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
assert 'targetVersion={}'.format(target_version) in query_string
assert 'includeConfig={}'.format('true' if include_config else 'false') in query_string
assert 'repoType={}'.format(repo_type) in query_string
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['tags'] == ['testString']
assert req_body['target_kinds'] == ['testString']
#--------------------------------------------------------
# test_import_offering_version_required_params()
#--------------------------------------------------------
@responses.activate
def test_import_offering_version_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/version')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
# Invoke method
response = service.import_offering_version(
catalog_identifier,
offering_id,
zipurl,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
#--------------------------------------------------------
# test_import_offering_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_import_offering_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/version')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
"zipurl": zipurl,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.import_offering_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for import_offering
#-----------------------------------------------------------------------------
class TestImportOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# import_offering()
#--------------------------------------------------------
@responses.activate
def test_import_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/import/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
zipurl = 'testString'
tags = ['testString']
target_kinds = ['testString']
offering_id = 'testString'
include_config = True
repo_type = 'testString'
x_auth_token = 'testString'
# Invoke method
response = service.import_offering(
catalog_identifier,
zipurl,
tags=tags,
target_kinds=target_kinds,
offering_id=offering_id,
include_config=include_config,
repo_type=repo_type,
x_auth_token=x_auth_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
assert 'offeringID={}'.format(offering_id) in query_string
assert 'includeConfig={}'.format('true' if include_config else 'false') in query_string
assert 'repoType={}'.format(repo_type) in query_string
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['tags'] == ['testString']
assert req_body['target_kinds'] == ['testString']
#--------------------------------------------------------
# test_import_offering_required_params()
#--------------------------------------------------------
@responses.activate
def test_import_offering_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/import/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
zipurl = 'testString'
# Invoke method
response = service.import_offering(
catalog_identifier,
zipurl,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
#--------------------------------------------------------
# test_import_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_import_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/import/offerings')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
zipurl = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"zipurl": zipurl,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.import_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for reload_offering
#-----------------------------------------------------------------------------
class TestReloadOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# reload_offering()
#--------------------------------------------------------
@responses.activate
def test_reload_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/reload')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
target_version = 'testString'
tags = ['testString']
target_kinds = ['testString']
repo_type = 'testString'
x_auth_token = 'testString'
# Invoke method
response = service.reload_offering(
catalog_identifier,
offering_id,
zipurl,
target_version,
tags=tags,
target_kinds=target_kinds,
repo_type=repo_type,
x_auth_token=x_auth_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
assert 'targetVersion={}'.format(target_version) in query_string
assert 'repoType={}'.format(repo_type) in query_string
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['tags'] == ['testString']
assert req_body['target_kinds'] == ['testString']
#--------------------------------------------------------
# test_reload_offering_required_params()
#--------------------------------------------------------
@responses.activate
def test_reload_offering_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/reload')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
target_version = 'testString'
# Invoke method
response = service.reload_offering(
catalog_identifier,
offering_id,
zipurl,
target_version,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'zipurl={}'.format(zipurl) in query_string
assert 'targetVersion={}'.format(target_version) in query_string
#--------------------------------------------------------
# test_reload_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_reload_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/reload')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
zipurl = 'testString'
target_version = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
"zipurl": zipurl,
"target_version": target_version,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.reload_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_offering
#-----------------------------------------------------------------------------
class TestGetOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_offering()
#--------------------------------------------------------
@responses.activate
def test_get_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Invoke method
response = service.get_offering(
catalog_identifier,
offering_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for replace_offering
#-----------------------------------------------------------------------------
class TestReplaceOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# replace_offering()
#--------------------------------------------------------
@responses.activate
def test_replace_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Rating model
rating_model = {}
rating_model['one_star_count'] = 38
rating_model['two_star_count'] = 38
rating_model['three_star_count'] = 38
rating_model['four_star_count'] = 38
# Construct a dict representation of a Feature model
feature_model = {}
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
# Construct a dict representation of a Configuration model
configuration_model = {}
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
# Construct a dict representation of a Deployment model
deployment_model = {}
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
# Construct a dict representation of a License model
license_model = {}
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
# Construct a dict representation of a Resource model
resource_model = {}
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
# Construct a dict representation of a Script model
script_model = {}
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
# Construct a dict representation of a State model
state_model = {}
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
# Construct a dict representation of a Validation model
validation_model = {}
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
# Construct a dict representation of a VersionEntitlement model
version_entitlement_model = {}
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
# Construct a dict representation of a Plan model
plan_model = {}
plan_model['id'] = 'testString'
plan_model['label'] = 'testString'
plan_model['name'] = 'testString'
plan_model['short_description'] = 'testString'
plan_model['long_description'] = 'testString'
plan_model['metadata'] = { 'foo': 'bar' }
plan_model['tags'] = ['testString']
plan_model['additional_features'] = [feature_model]
plan_model['created'] = '2020-01-28T18:40:40.123456Z'
plan_model['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model['deployments'] = [deployment_model]
# Construct a dict representation of a Version model
version_model = {}
version_model['id'] = 'testString'
version_model['_rev'] = 'testString'
version_model['crn'] = 'testString'
version_model['version'] = 'testString'
version_model['sha'] = 'testString'
version_model['created'] = '2020-01-28T18:40:40.123456Z'
version_model['updated'] = '2020-01-28T18:40:40.123456Z'
version_model['offering_id'] = 'testString'
version_model['catalog_id'] = 'testString'
version_model['kind_id'] = 'testString'
version_model['tags'] = ['testString']
version_model['repo_url'] = 'testString'
version_model['source_url'] = 'testString'
version_model['tgz_url'] = 'testString'
version_model['configuration'] = [configuration_model]
version_model['metadata'] = { 'foo': 'bar' }
version_model['validation'] = validation_model
version_model['required_resources'] = [resource_model]
version_model['single_instance'] = True
version_model['install'] = script_model
version_model['pre_install'] = [script_model]
version_model['entitlement'] = version_entitlement_model
version_model['licenses'] = [license_model]
version_model['image_manifest_url'] = 'testString'
version_model['deprecated'] = True
version_model['package_version'] = 'testString'
version_model['state'] = state_model
version_model['version_locator'] = 'testString'
version_model['console_url'] = 'testString'
version_model['long_description'] = 'testString'
version_model['whitelisted_accounts'] = ['testString']
# Construct a dict representation of a Kind model
kind_model = {}
kind_model['id'] = 'testString'
kind_model['format_kind'] = 'testString'
kind_model['target_kind'] = 'testString'
kind_model['metadata'] = { 'foo': 'bar' }
kind_model['install_description'] = 'testString'
kind_model['tags'] = ['testString']
kind_model['additional_features'] = [feature_model]
kind_model['created'] = '2020-01-28T18:40:40.123456Z'
kind_model['updated'] = '2020-01-28T18:40:40.123456Z'
kind_model['versions'] = [version_model]
kind_model['plans'] = [plan_model]
# Construct a dict representation of a RepoInfo model
repo_info_model = {}
repo_info_model['token'] = 'testString'
repo_info_model['type'] = 'testString'
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
id = 'testString'
rev = 'testString'
url = 'testString'
crn = 'testString'
label = 'testString'
name = 'testString'
offering_icon_url = 'testString'
offering_docs_url = 'testString'
offering_support_url = 'testString'
tags = ['testString']
rating = rating_model
created = datetime.fromtimestamp(1580236840.123456, timezone.utc)
updated = datetime.fromtimestamp(1580236840.123456, timezone.utc)
short_description = 'testString'
long_description = 'testString'
features = [feature_model]
kinds = [kind_model]
permit_request_ibm_public_publish = True
ibm_publish_approved = True
public_publish_approved = True
public_original_crn = 'testString'
publish_public_crn = 'testString'
portal_approval_record = 'testString'
portal_ui_url = 'testString'
catalog_id = 'testString'
catalog_name = 'testString'
metadata = { 'foo': 'bar' }
disclaimer = 'testString'
hidden = True
provider = 'testString'
repo_info = repo_info_model
# Invoke method
response = service.replace_offering(
catalog_identifier,
offering_id,
id=id,
rev=rev,
url=url,
crn=crn,
label=label,
name=name,
offering_icon_url=offering_icon_url,
offering_docs_url=offering_docs_url,
offering_support_url=offering_support_url,
tags=tags,
rating=rating,
created=created,
updated=updated,
short_description=short_description,
long_description=long_description,
features=features,
kinds=kinds,
permit_request_ibm_public_publish=permit_request_ibm_public_publish,
ibm_publish_approved=ibm_publish_approved,
public_publish_approved=public_publish_approved,
public_original_crn=public_original_crn,
publish_public_crn=publish_public_crn,
portal_approval_record=portal_approval_record,
portal_ui_url=portal_ui_url,
catalog_id=catalog_id,
catalog_name=catalog_name,
metadata=metadata,
disclaimer=disclaimer,
hidden=hidden,
provider=provider,
repo_info=repo_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['id'] == 'testString'
assert req_body['_rev'] == 'testString'
assert req_body['url'] == 'testString'
assert req_body['crn'] == 'testString'
assert req_body['label'] == 'testString'
assert req_body['name'] == 'testString'
assert req_body['offering_icon_url'] == 'testString'
assert req_body['offering_docs_url'] == 'testString'
assert req_body['offering_support_url'] == 'testString'
assert req_body['tags'] == ['testString']
assert req_body['rating'] == rating_model
assert req_body['created'] == '2020-01-28T18:40:40.123456Z'
assert req_body['updated'] == '2020-01-28T18:40:40.123456Z'
assert req_body['short_description'] == 'testString'
assert req_body['long_description'] == 'testString'
assert req_body['features'] == [feature_model]
assert req_body['kinds'] == [kind_model]
assert req_body['permit_request_ibm_public_publish'] == True
assert req_body['ibm_publish_approved'] == True
assert req_body['public_publish_approved'] == True
assert req_body['public_original_crn'] == 'testString'
assert req_body['publish_public_crn'] == 'testString'
assert req_body['portal_approval_record'] == 'testString'
assert req_body['portal_ui_url'] == 'testString'
assert req_body['catalog_id'] == 'testString'
assert req_body['catalog_name'] == 'testString'
assert req_body['metadata'] == { 'foo': 'bar' }
assert req_body['disclaimer'] == 'testString'
assert req_body['hidden'] == True
assert req_body['provider'] == 'testString'
assert req_body['repo_info'] == repo_info_model
#--------------------------------------------------------
# test_replace_offering_required_params()
#--------------------------------------------------------
@responses.activate
def test_replace_offering_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Invoke method
response = service.replace_offering(
catalog_identifier,
offering_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_replace_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_replace_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
mock_response = '{"id": "id", "_rev": "rev", "label": "label", "short_description": "short_description", "catalog_icon_url": "catalog_icon_url", "tags": ["tags"], "url": "url", "crn": "crn", "offerings_url": "offerings_url", "features": [{"title": "title", "description": "description"}], "disabled": true, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "resource_group_id": "resource_group_id", "owning_account": "owning_account", "catalog_filters": {"include_all": false, "category_filters": {"mapKey": {"include": false, "filter": {"filter_terms": ["filter_terms"]}}}, "id_filters": {"include": {"filter_terms": ["filter_terms"]}, "exclude": {"filter_terms": ["filter_terms"]}}}, "syndication_settings": {"remove_related_components": false, "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "history": {"namespaces": ["namespaces"], "clusters": [{"region": "region", "id": "id", "name": "name", "resource_group_name": "resource_group_name", "type": "type", "namespaces": ["namespaces"], "all_namespaces": true}], "last_run": "2019-01-01T12:00:00"}, "authorization": {"token": "token", "last_run": "2019-01-01T12:00:00"}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.replace_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_offering
#-----------------------------------------------------------------------------
class TestDeleteOffering():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_offering()
#--------------------------------------------------------
@responses.activate
def test_delete_offering_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Invoke method
response = service.delete_offering(
catalog_identifier,
offering_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_offering_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_offering_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_offering(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for replace_offering_icon
#-----------------------------------------------------------------------------
class TestReplaceOfferingIcon():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# replace_offering_icon()
#--------------------------------------------------------
@responses.activate
def test_replace_offering_icon_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/icon/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
file_name = 'testString'
# Invoke method
response = service.replace_offering_icon(
catalog_identifier,
offering_id,
file_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_replace_offering_icon_value_error()
#--------------------------------------------------------
@responses.activate
def test_replace_offering_icon_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/icon/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
file_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
"file_name": file_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.replace_offering_icon(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for update_offering_ibm
#-----------------------------------------------------------------------------
class TestUpdateOfferingIbm():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# update_offering_ibm()
#--------------------------------------------------------
@responses.activate
def test_update_offering_ibm_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/publish/ibm/true')
mock_response = '{"ibm": false, "public": true, "changed": false}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
approval_type = 'ibm'
approved = 'true'
# Invoke method
response = service.update_offering_ibm(
catalog_identifier,
offering_id,
approval_type,
approved,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_update_offering_ibm_value_error()
#--------------------------------------------------------
@responses.activate
def test_update_offering_ibm_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/catalogs/testString/offerings/testString/publish/ibm/true')
mock_response = '{"ibm": false, "public": true, "changed": false}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
catalog_identifier = 'testString'
offering_id = 'testString'
approval_type = 'ibm'
approved = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"catalog_identifier": catalog_identifier,
"offering_id": offering_id,
"approval_type": approval_type,
"approved": approved,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.update_offering_ibm(**req_copy)
# endregion
##############################################################################
# End of Service: Offerings
##############################################################################
##############################################################################
# Start of Service: Versions
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_version_about
#-----------------------------------------------------------------------------
class TestGetVersionAbout():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version_about()
#--------------------------------------------------------
@responses.activate
def test_get_version_about_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/about')
mock_response = '"operation_response"'
responses.add(responses.GET,
url,
body=mock_response,
content_type='text/markdown',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_version_about(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_about_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_about_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/about')
mock_response = '"operation_response"'
responses.add(responses.GET,
url,
body=mock_response,
content_type='text/markdown',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version_about(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_version_license
#-----------------------------------------------------------------------------
class TestGetVersionLicense():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version_license()
#--------------------------------------------------------
@responses.activate
def test_get_version_license_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/licenses/testString')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
license_id = 'testString'
# Invoke method
response = service.get_version_license(
version_loc_id,
license_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_license_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_license_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/licenses/testString')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
license_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"license_id": license_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version_license(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_version_container_images
#-----------------------------------------------------------------------------
class TestGetVersionContainerImages():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version_container_images()
#--------------------------------------------------------
@responses.activate
def test_get_version_container_images_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/containerImages')
mock_response = '{"description": "description", "images": [{"image": "image"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_version_container_images(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_container_images_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_container_images_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/containerImages')
mock_response = '{"description": "description", "images": [{"image": "image"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version_container_images(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for deprecate_version
#-----------------------------------------------------------------------------
class TestDeprecateVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# deprecate_version()
#--------------------------------------------------------
@responses.activate
def test_deprecate_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/deprecate')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.deprecate_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_deprecate_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_deprecate_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/deprecate')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.deprecate_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for account_publish_version
#-----------------------------------------------------------------------------
class TestAccountPublishVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# account_publish_version()
#--------------------------------------------------------
@responses.activate
def test_account_publish_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/account-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.account_publish_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_account_publish_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_account_publish_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/account-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.account_publish_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for ibm_publish_version
#-----------------------------------------------------------------------------
class TestIbmPublishVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# ibm_publish_version()
#--------------------------------------------------------
@responses.activate
def test_ibm_publish_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/ibm-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.ibm_publish_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_ibm_publish_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_ibm_publish_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/ibm-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.ibm_publish_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for public_publish_version
#-----------------------------------------------------------------------------
class TestPublicPublishVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# public_publish_version()
#--------------------------------------------------------
@responses.activate
def test_public_publish_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/public-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.public_publish_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_public_publish_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_public_publish_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/public-publish')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.public_publish_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for commit_version
#-----------------------------------------------------------------------------
class TestCommitVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# commit_version()
#--------------------------------------------------------
@responses.activate
def test_commit_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/commit')
responses.add(responses.POST,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.commit_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_commit_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_commit_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/commit')
responses.add(responses.POST,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.commit_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_version_working_copy
#-----------------------------------------------------------------------------
class TestGetVersionWorkingCopy():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version_working_copy()
#--------------------------------------------------------
@responses.activate
def test_get_version_working_copy_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/workingcopy')
mock_response = '{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_version_working_copy(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_working_copy_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_working_copy_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/workingcopy')
mock_response = '{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version_working_copy(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_version_updates
#-----------------------------------------------------------------------------
class TestGetVersionUpdates():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version_updates()
#--------------------------------------------------------
@responses.activate
def test_get_version_updates_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/updates')
mock_response = '[{"version_locator": "version_locator", "version": "version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "package_version": "package_version", "can_update": true, "messages": {"anyKey": "anyValue"}}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
cluster_id = 'testString'
region = 'testString'
resource_group_id = 'testString'
namespace = 'testString'
# Invoke method
response = service.get_version_updates(
version_loc_id,
cluster_id=cluster_id,
region=region,
resource_group_id=resource_group_id,
namespace=namespace,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
assert 'resource_group_id={}'.format(resource_group_id) in query_string
assert 'namespace={}'.format(namespace) in query_string
#--------------------------------------------------------
# test_get_version_updates_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_version_updates_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/updates')
mock_response = '[{"version_locator": "version_locator", "version": "version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "package_version": "package_version", "can_update": true, "messages": {"anyKey": "anyValue"}}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_version_updates(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_updates_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_updates_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/updates')
mock_response = '[{"version_locator": "version_locator", "version": "version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "package_version": "package_version", "can_update": true, "messages": {"anyKey": "anyValue"}}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version_updates(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_version
#-----------------------------------------------------------------------------
class TestGetVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_version()
#--------------------------------------------------------
@responses.activate
def test_get_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString')
mock_response = '{"id": "id", "_rev": "rev", "url": "url", "crn": "crn", "label": "label", "name": "name", "offering_icon_url": "offering_icon_url", "offering_docs_url": "offering_docs_url", "offering_support_url": "offering_support_url", "tags": ["tags"], "rating": {"one_star_count": 14, "two_star_count": 14, "three_star_count": 16, "four_star_count": 15}, "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "short_description": "short_description", "long_description": "long_description", "features": [{"title": "title", "description": "description"}], "kinds": [{"id": "id", "format_kind": "format_kind", "target_kind": "target_kind", "metadata": {"anyKey": "anyValue"}, "install_description": "install_description", "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "versions": [{"id": "id", "_rev": "rev", "crn": "crn", "version": "version", "sha": "sha", "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "offering_id": "offering_id", "catalog_id": "catalog_id", "kind_id": "kind_id", "tags": ["tags"], "repo_url": "repo_url", "source_url": "source_url", "tgz_url": "tgz_url", "configuration": [{"key": "key", "type": "type", "default_value": {"anyKey": "anyValue"}, "value_constraint": "value_constraint", "description": "description", "required": true, "options": [{"anyKey": "anyValue"}], "hidden": true}], "metadata": {"anyKey": "anyValue"}, "validation": {"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}, "required_resources": [{"type": "mem", "value": {"anyKey": "anyValue"}}], "single_instance": false, "install": {"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}, "pre_install": [{"instructions": "instructions", "script": "script", "script_permission": "script_permission", "delete_script": "delete_script", "scope": "scope"}], "entitlement": {"provider_name": "provider_name", "provider_id": "provider_id", "product_id": "product_id", "part_numbers": ["part_numbers"], "image_repo_name": "image_repo_name"}, "licenses": [{"id": "id", "name": "name", "type": "type", "url": "url", "description": "description"}], "image_manifest_url": "image_manifest_url", "deprecated": true, "package_version": "package_version", "state": {"current": "current", "current_entered": "2019-01-01T12:00:00", "pending": "pending", "pending_requested": "2019-01-01T12:00:00", "previous": "previous"}, "version_locator": "version_locator", "console_url": "console_url", "long_description": "long_description", "whitelisted_accounts": ["whitelisted_accounts"]}], "plans": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "additional_features": [{"title": "title", "description": "description"}], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00", "deployments": [{"id": "id", "label": "label", "name": "name", "short_description": "short_description", "long_description": "long_description", "metadata": {"anyKey": "anyValue"}, "tags": ["tags"], "created": "2019-01-01T12:00:00", "updated": "2019-01-01T12:00:00"}]}]}], "permit_request_ibm_public_publish": false, "ibm_publish_approved": true, "public_publish_approved": false, "public_original_crn": "public_original_crn", "publish_public_crn": "publish_public_crn", "portal_approval_record": "portal_approval_record", "portal_ui_url": "portal_ui_url", "catalog_id": "catalog_id", "catalog_name": "catalog_name", "metadata": {"anyKey": "anyValue"}, "disclaimer": "disclaimer", "hidden": true, "provider": "provider", "repo_info": {"token": "token", "type": "type"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_version
#-----------------------------------------------------------------------------
class TestDeleteVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_version()
#--------------------------------------------------------
@responses.activate
def test_delete_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.delete_version(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for list_versions
#-----------------------------------------------------------------------------
class TestListVersions():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_versions()
#--------------------------------------------------------
@responses.activate
def test_list_versions_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Invoke method
response = service.list_versions(
q,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'q={}'.format(q) in query_string
#--------------------------------------------------------
# test_list_versions_value_error()
#--------------------------------------------------------
@responses.activate
def test_list_versions_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"q": q,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.list_versions(**req_copy)
# endregion
##############################################################################
# End of Service: Versions
##############################################################################
##############################################################################
# Start of Service: Repo
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_repos
#-----------------------------------------------------------------------------
class TestGetRepos():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_repos()
#--------------------------------------------------------
@responses.activate
def test_get_repos_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/repo/testString/entries')
mock_response = '{"chart": {"api_version": "api_version", "created": "2019-01-01T12:00:00", "description": "description", "deprecated": true, "digest": "digest", "home": "home", "icon": "icon", "keywords": ["keywords"], "maintainers": [{"email": "email", "name": "name"}], "name": "name", "tiller_version": "tiller_version", "urls": ["urls"], "sources": ["sources"], "version": "version", "appVersion": "app_version"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
type = 'testString'
repourl = 'testString'
# Invoke method
response = service.get_repos(
type,
repourl,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'repourl={}'.format(repourl) in query_string
#--------------------------------------------------------
# test_get_repos_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_repos_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/repo/testString/entries')
mock_response = '{"chart": {"api_version": "api_version", "created": "2019-01-01T12:00:00", "description": "description", "deprecated": true, "digest": "digest", "home": "home", "icon": "icon", "keywords": ["keywords"], "maintainers": [{"email": "email", "name": "name"}], "name": "name", "tiller_version": "tiller_version", "urls": ["urls"], "sources": ["sources"], "version": "version", "appVersion": "app_version"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
type = 'testString'
repourl = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"type": type,
"repourl": repourl,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_repos(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_repo
#-----------------------------------------------------------------------------
class TestGetRepo():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_repo()
#--------------------------------------------------------
@responses.activate
def test_get_repo_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/repo/testString')
mock_response = '{"chart": {"Chart.yaml": {"name": "name", "description": "description", "icon": "icon", "version": "version", "appVersion": "app_version"}, "sha": {"anyKey": "anyValue"}, "README.md": "readme_md", "values-metadata": {"anyKey": "anyValue"}, "license-metadata": {"anyKey": "anyValue"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
type = 'testString'
charturl = 'testString'
# Invoke method
response = service.get_repo(
type,
charturl,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'charturl={}'.format(charturl) in query_string
#--------------------------------------------------------
# test_get_repo_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_repo_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/repo/testString')
mock_response = '{"chart": {"Chart.yaml": {"name": "name", "description": "description", "icon": "icon", "version": "version", "appVersion": "app_version"}, "sha": {"anyKey": "anyValue"}, "README.md": "readme_md", "values-metadata": {"anyKey": "anyValue"}, "license-metadata": {"anyKey": "anyValue"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
type = 'testString'
charturl = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"type": type,
"charturl": charturl,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_repo(**req_copy)
# endregion
##############################################################################
# End of Service: Repo
##############################################################################
##############################################################################
# Start of Service: Deploy
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_clusters
#-----------------------------------------------------------------------------
class TestListClusters():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_clusters()
#--------------------------------------------------------
@responses.activate
def test_list_clusters_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"resource_group_id": "resource_group_id", "resource_group_name": "resource_group_name", "id": "id", "name": "name", "region": "region"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
limit = 38
offset = 38
type = 'testString'
# Invoke method
response = service.list_clusters(
limit=limit,
offset=offset,
type=type,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'limit={}'.format(limit) in query_string
assert 'offset={}'.format(offset) in query_string
assert 'type={}'.format(type) in query_string
#--------------------------------------------------------
# test_list_clusters_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_clusters_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"resource_group_id": "resource_group_id", "resource_group_name": "resource_group_name", "id": "id", "name": "name", "region": "region"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_clusters()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_cluster
#-----------------------------------------------------------------------------
class TestGetCluster():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_cluster()
#--------------------------------------------------------
@responses.activate
def test_get_cluster_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters/testString')
mock_response = '{"resource_group_id": "resource_group_id", "resource_group_name": "resource_group_name", "id": "id", "name": "name", "region": "region"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
cluster_id = 'testString'
region = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.get_cluster(
cluster_id,
region,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'region={}'.format(region) in query_string
#--------------------------------------------------------
# test_get_cluster_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_cluster_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters/testString')
mock_response = '{"resource_group_id": "resource_group_id", "resource_group_name": "resource_group_name", "id": "id", "name": "name", "region": "region"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
cluster_id = 'testString'
region = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"cluster_id": cluster_id,
"region": region,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_cluster(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_namespaces
#-----------------------------------------------------------------------------
class TestGetNamespaces():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_namespaces()
#--------------------------------------------------------
@responses.activate
def test_get_namespaces_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters/testString/namespaces')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": ["resources"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
cluster_id = 'testString'
region = 'testString'
x_auth_refresh_token = 'testString'
limit = 38
offset = 38
# Invoke method
response = service.get_namespaces(
cluster_id,
region,
x_auth_refresh_token,
limit=limit,
offset=offset,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'region={}'.format(region) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'offset={}'.format(offset) in query_string
#--------------------------------------------------------
# test_get_namespaces_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_namespaces_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters/testString/namespaces')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": ["resources"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
cluster_id = 'testString'
region = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.get_namespaces(
cluster_id,
region,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'region={}'.format(region) in query_string
#--------------------------------------------------------
# test_get_namespaces_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_namespaces_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/clusters/testString/namespaces')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": ["resources"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
cluster_id = 'testString'
region = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"cluster_id": cluster_id,
"region": region,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_namespaces(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for create_operator
#-----------------------------------------------------------------------------
class TestCreateOperator():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_operator()
#--------------------------------------------------------
@responses.activate
def test_create_operator_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespaces = ['testString']
version_locator_id = 'testString'
# Invoke method
response = service.create_operator(
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespaces=namespaces,
version_locator_id=version_locator_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cluster_id'] == 'testString'
assert req_body['region'] == 'testString'
assert req_body['namespaces'] == ['testString']
assert req_body['version_locator_id'] == 'testString'
#--------------------------------------------------------
# test_create_operator_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_operator_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
# Invoke method
response = service.create_operator(
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_create_operator_value_error()
#--------------------------------------------------------
@responses.activate
def test_create_operator_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.create_operator(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for list_operators
#-----------------------------------------------------------------------------
class TestListOperators():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_operators()
#--------------------------------------------------------
@responses.activate
def test_list_operators_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
version_locator_id = 'testString'
# Invoke method
response = service.list_operators(
x_auth_refresh_token,
cluster_id,
region,
version_locator_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
assert 'version_locator_id={}'.format(version_locator_id) in query_string
#--------------------------------------------------------
# test_list_operators_value_error()
#--------------------------------------------------------
@responses.activate
def test_list_operators_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
version_locator_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"x_auth_refresh_token": x_auth_refresh_token,
"cluster_id": cluster_id,
"region": region,
"version_locator_id": version_locator_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.list_operators(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for replace_operator
#-----------------------------------------------------------------------------
class TestReplaceOperator():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# replace_operator()
#--------------------------------------------------------
@responses.activate
def test_replace_operator_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespaces = ['testString']
version_locator_id = 'testString'
# Invoke method
response = service.replace_operator(
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespaces=namespaces,
version_locator_id=version_locator_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cluster_id'] == 'testString'
assert req_body['region'] == 'testString'
assert req_body['namespaces'] == ['testString']
assert req_body['version_locator_id'] == 'testString'
#--------------------------------------------------------
# test_replace_operator_required_params()
#--------------------------------------------------------
@responses.activate
def test_replace_operator_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
# Invoke method
response = service.replace_operator(
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_replace_operator_value_error()
#--------------------------------------------------------
@responses.activate
def test_replace_operator_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
mock_response = '[{"phase": "phase", "message": "message", "link": "link", "name": "name", "version": "version", "namespace": "namespace", "package_name": "package_name", "catalog_id": "catalog_id"}]'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.replace_operator(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_operator
#-----------------------------------------------------------------------------
class TestDeleteOperator():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_operator()
#--------------------------------------------------------
@responses.activate
def test_delete_operator_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
version_locator_id = 'testString'
# Invoke method
response = service.delete_operator(
x_auth_refresh_token,
cluster_id,
region,
version_locator_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
assert 'version_locator_id={}'.format(version_locator_id) in query_string
#--------------------------------------------------------
# test_delete_operator_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_operator_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/kubernetes/olm/operator')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
version_locator_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"x_auth_refresh_token": x_auth_refresh_token,
"cluster_id": cluster_id,
"region": region,
"version_locator_id": version_locator_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_operator(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for install_version
#-----------------------------------------------------------------------------
class TestInstallVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# install_version()
#--------------------------------------------------------
@responses.activate
def test_install_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/install')
responses.add(responses.POST,
url,
status=202)
# Construct a dict representation of a DeployRequestBodySchematics model
deploy_request_body_schematics_model = {}
deploy_request_body_schematics_model['name'] = 'testString'
deploy_request_body_schematics_model['description'] = 'testString'
deploy_request_body_schematics_model['tags'] = ['testString']
deploy_request_body_schematics_model['resource_group_id'] = 'testString'
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespace = 'testString'
override_values = { 'foo': 'bar' }
entitlement_apikey = 'testString'
schematics = deploy_request_body_schematics_model
script = 'testString'
script_id = 'testString'
version_locator_id = 'testString'
vcenter_id = 'testString'
vcenter_password = 'testString'
vcenter_location = 'testString'
# Invoke method
response = service.install_version(
version_loc_id,
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespace=namespace,
override_values=override_values,
entitlement_apikey=entitlement_apikey,
schematics=schematics,
script=script,
script_id=script_id,
version_locator_id=version_locator_id,
vcenter_id=vcenter_id,
vcenter_password=vcenter_password,
vcenter_location=vcenter_location,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cluster_id'] == 'testString'
assert req_body['region'] == 'testString'
assert req_body['namespace'] == 'testString'
assert req_body['override_values'] == { 'foo': 'bar' }
assert req_body['entitlement_apikey'] == 'testString'
assert req_body['schematics'] == deploy_request_body_schematics_model
assert req_body['script'] == 'testString'
assert req_body['script_id'] == 'testString'
assert req_body['version_locator_id'] == 'testString'
assert req_body['vcenter_id'] == 'testString'
assert req_body['vcenter_password'] == 'testString'
assert req_body['vcenter_location'] == 'testString'
#--------------------------------------------------------
# test_install_version_required_params()
#--------------------------------------------------------
@responses.activate
def test_install_version_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/install')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.install_version(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_install_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_install_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/install')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.install_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for preinstall_version
#-----------------------------------------------------------------------------
class TestPreinstallVersion():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# preinstall_version()
#--------------------------------------------------------
@responses.activate
def test_preinstall_version_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
responses.add(responses.POST,
url,
status=202)
# Construct a dict representation of a DeployRequestBodySchematics model
deploy_request_body_schematics_model = {}
deploy_request_body_schematics_model['name'] = 'testString'
deploy_request_body_schematics_model['description'] = 'testString'
deploy_request_body_schematics_model['tags'] = ['testString']
deploy_request_body_schematics_model['resource_group_id'] = 'testString'
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespace = 'testString'
override_values = { 'foo': 'bar' }
entitlement_apikey = 'testString'
schematics = deploy_request_body_schematics_model
script = 'testString'
script_id = 'testString'
version_locator_id = 'testString'
vcenter_id = 'testString'
vcenter_password = 'testString'
vcenter_location = 'testString'
# Invoke method
response = service.preinstall_version(
version_loc_id,
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespace=namespace,
override_values=override_values,
entitlement_apikey=entitlement_apikey,
schematics=schematics,
script=script,
script_id=script_id,
version_locator_id=version_locator_id,
vcenter_id=vcenter_id,
vcenter_password=vcenter_password,
vcenter_location=vcenter_location,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cluster_id'] == 'testString'
assert req_body['region'] == 'testString'
assert req_body['namespace'] == 'testString'
assert req_body['override_values'] == { 'foo': 'bar' }
assert req_body['entitlement_apikey'] == 'testString'
assert req_body['schematics'] == deploy_request_body_schematics_model
assert req_body['script'] == 'testString'
assert req_body['script_id'] == 'testString'
assert req_body['version_locator_id'] == 'testString'
assert req_body['vcenter_id'] == 'testString'
assert req_body['vcenter_password'] == 'testString'
assert req_body['vcenter_location'] == 'testString'
#--------------------------------------------------------
# test_preinstall_version_required_params()
#--------------------------------------------------------
@responses.activate
def test_preinstall_version_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.preinstall_version(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_preinstall_version_value_error()
#--------------------------------------------------------
@responses.activate
def test_preinstall_version_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.preinstall_version(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_preinstall
#-----------------------------------------------------------------------------
class TestGetPreinstall():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_preinstall()
#--------------------------------------------------------
@responses.activate
def test_get_preinstall_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
mock_response = '{"metadata": {"cluster_id": "cluster_id", "region": "region", "namespace": "namespace", "workspace_id": "workspace_id", "workspace_name": "workspace_name"}, "release": {"deployments": [{"anyKey": "anyValue"}], "replicasets": [{"anyKey": "anyValue"}], "statefulsets": [{"anyKey": "anyValue"}], "pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}, "content_mgmt": {"pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespace = 'testString'
# Invoke method
response = service.get_preinstall(
version_loc_id,
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespace=namespace,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
assert 'namespace={}'.format(namespace) in query_string
#--------------------------------------------------------
# test_get_preinstall_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_preinstall_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
mock_response = '{"metadata": {"cluster_id": "cluster_id", "region": "region", "namespace": "namespace", "workspace_id": "workspace_id", "workspace_name": "workspace_name"}, "release": {"deployments": [{"anyKey": "anyValue"}], "replicasets": [{"anyKey": "anyValue"}], "statefulsets": [{"anyKey": "anyValue"}], "pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}, "content_mgmt": {"pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.get_preinstall(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_preinstall_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_preinstall_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/preinstall')
mock_response = '{"metadata": {"cluster_id": "cluster_id", "region": "region", "namespace": "namespace", "workspace_id": "workspace_id", "workspace_name": "workspace_name"}, "release": {"deployments": [{"anyKey": "anyValue"}], "replicasets": [{"anyKey": "anyValue"}], "statefulsets": [{"anyKey": "anyValue"}], "pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}, "content_mgmt": {"pods": [{"anyKey": "anyValue"}], "errors": [{"anyKey": "anyValue"}]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_preinstall(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for validation_install
#-----------------------------------------------------------------------------
class TestValidationInstall():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# validation_install()
#--------------------------------------------------------
@responses.activate
def test_validation_install_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/install')
responses.add(responses.POST,
url,
status=202)
# Construct a dict representation of a DeployRequestBodySchematics model
deploy_request_body_schematics_model = {}
deploy_request_body_schematics_model['name'] = 'testString'
deploy_request_body_schematics_model['description'] = 'testString'
deploy_request_body_schematics_model['tags'] = ['testString']
deploy_request_body_schematics_model['resource_group_id'] = 'testString'
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
cluster_id = 'testString'
region = 'testString'
namespace = 'testString'
override_values = { 'foo': 'bar' }
entitlement_apikey = 'testString'
schematics = deploy_request_body_schematics_model
script = 'testString'
script_id = 'testString'
version_locator_id = 'testString'
vcenter_id = 'testString'
vcenter_password = 'testString'
vcenter_location = 'testString'
# Invoke method
response = service.validation_install(
version_loc_id,
x_auth_refresh_token,
cluster_id=cluster_id,
region=region,
namespace=namespace,
override_values=override_values,
entitlement_apikey=entitlement_apikey,
schematics=schematics,
script=script,
script_id=script_id,
version_locator_id=version_locator_id,
vcenter_id=vcenter_id,
vcenter_password=vcenter_password,
vcenter_location=vcenter_location,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cluster_id'] == 'testString'
assert req_body['region'] == 'testString'
assert req_body['namespace'] == 'testString'
assert req_body['override_values'] == { 'foo': 'bar' }
assert req_body['entitlement_apikey'] == 'testString'
assert req_body['schematics'] == deploy_request_body_schematics_model
assert req_body['script'] == 'testString'
assert req_body['script_id'] == 'testString'
assert req_body['version_locator_id'] == 'testString'
assert req_body['vcenter_id'] == 'testString'
assert req_body['vcenter_password'] == 'testString'
assert req_body['vcenter_location'] == 'testString'
#--------------------------------------------------------
# test_validation_install_required_params()
#--------------------------------------------------------
@responses.activate
def test_validation_install_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/install')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.validation_install(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
#--------------------------------------------------------
# test_validation_install_value_error()
#--------------------------------------------------------
@responses.activate
def test_validation_install_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/install')
responses.add(responses.POST,
url,
status=202)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.validation_install(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_validation_status
#-----------------------------------------------------------------------------
class TestGetValidationStatus():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_validation_status()
#--------------------------------------------------------
@responses.activate
def test_get_validation_status_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/install')
mock_response = '{"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.get_validation_status(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_validation_status_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_validation_status_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/install')
mock_response = '{"validated": "2019-01-01T12:00:00", "requested": "2019-01-01T12:00:00", "state": "state", "last_operation": "last_operation", "target": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_validation_status(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_override_values
#-----------------------------------------------------------------------------
class TestGetOverrideValues():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_override_values()
#--------------------------------------------------------
@responses.activate
def test_get_override_values_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/overridevalues')
mock_response = '{"mapKey": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Invoke method
response = service.get_override_values(
version_loc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_override_values_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_override_values_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/validation/overridevalues')
mock_response = '{"mapKey": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_override_values(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_schematics_workspaces
#-----------------------------------------------------------------------------
class TestGetSchematicsWorkspaces():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_schematics_workspaces()
#--------------------------------------------------------
@responses.activate
def test_get_schematics_workspaces_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/workspaces')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "name": "name", "type": ["type"], "description": "description", "tags": ["tags"], "created_at": "2019-01-01T12:00:00", "created_by": "created_by", "status": "status", "workspace_status": {"frozen": true, "locked": true}, "template_ref": "template_ref", "template_repo": {"repo_url": "repo_url", "chart_name": "chart_name", "script_name": "script_name", "uninstall_script_name": "uninstall_script_name", "folder_name": "folder_name", "repo_sha_value": "repo_sha_value"}, "template_data": [{"anyKey": "anyValue"}], "runtime_data": {"id": "id", "engine_name": "engine_name", "engine_version": "engine_version", "state_store_url": "state_store_url", "log_store_url": "log_store_url"}, "shared_data": {"anyKey": "anyValue"}, "catalog_ref": {"item_id": "item_id", "item_name": "item_name", "item_url": "item_url"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Invoke method
response = service.get_schematics_workspaces(
version_loc_id,
x_auth_refresh_token,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_schematics_workspaces_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_schematics_workspaces_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/workspaces')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "name": "name", "type": ["type"], "description": "description", "tags": ["tags"], "created_at": "2019-01-01T12:00:00", "created_by": "created_by", "status": "status", "workspace_status": {"frozen": true, "locked": true}, "template_ref": "template_ref", "template_repo": {"repo_url": "repo_url", "chart_name": "chart_name", "script_name": "script_name", "uninstall_script_name": "uninstall_script_name", "folder_name": "folder_name", "repo_sha_value": "repo_sha_value"}, "template_data": [{"anyKey": "anyValue"}], "runtime_data": {"id": "id", "engine_name": "engine_name", "engine_version": "engine_version", "state_store_url": "state_store_url", "log_store_url": "log_store_url"}, "shared_data": {"anyKey": "anyValue"}, "catalog_ref": {"item_id": "item_id", "item_name": "item_name", "item_url": "item_url"}}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
x_auth_refresh_token = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"x_auth_refresh_token": x_auth_refresh_token,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_schematics_workspaces(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for can_deploy_schematics
#-----------------------------------------------------------------------------
class TestCanDeploySchematics():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# can_deploy_schematics()
#--------------------------------------------------------
@responses.activate
def test_can_deploy_schematics_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/candeploy')
mock_response = '{"pre_install": {"anyKey": "anyValue"}, "install": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
cluster_id = 'testString'
region = 'testString'
namespace = 'testString'
resource_group_id = 'testString'
# Invoke method
response = service.can_deploy_schematics(
version_loc_id,
cluster_id,
region,
namespace=namespace,
resource_group_id=resource_group_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
assert 'namespace={}'.format(namespace) in query_string
assert 'resource_group_id={}'.format(resource_group_id) in query_string
#--------------------------------------------------------
# test_can_deploy_schematics_required_params()
#--------------------------------------------------------
@responses.activate
def test_can_deploy_schematics_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/candeploy')
mock_response = '{"pre_install": {"anyKey": "anyValue"}, "install": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
cluster_id = 'testString'
region = 'testString'
# Invoke method
response = service.can_deploy_schematics(
version_loc_id,
cluster_id,
region,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'cluster_id={}'.format(cluster_id) in query_string
assert 'region={}'.format(region) in query_string
#--------------------------------------------------------
# test_can_deploy_schematics_value_error()
#--------------------------------------------------------
@responses.activate
def test_can_deploy_schematics_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/versions/testString/candeploy')
mock_response = '{"pre_install": {"anyKey": "anyValue"}, "install": {"anyKey": "anyValue"}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
version_loc_id = 'testString'
cluster_id = 'testString'
region = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"version_loc_id": version_loc_id,
"cluster_id": cluster_id,
"region": region,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.can_deploy_schematics(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_resource_groups
#-----------------------------------------------------------------------------
class TestGetResourceGroups():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_resource_groups()
#--------------------------------------------------------
@responses.activate
def test_get_resource_groups_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/deploy/schematics/resourcegroups')
mock_response = '{"offset": 6, "limit": 5, "total_count": 11, "resource_count": 14, "first": "first", "last": "last", "prev": "prev", "next": "next", "resources": [{"id": "id", "name": "name", "crn": "crn", "account_id": "account_id", "state": "state", "default": false}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_resource_groups()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# endregion
##############################################################################
# End of Service: Deploy
##############################################################################
##############################################################################
# Start of Service: Licensing
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_license_providers
#-----------------------------------------------------------------------------
class TestGetLicenseProviders():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_license_providers()
#--------------------------------------------------------
@responses.activate
def test_get_license_providers_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/license_providers')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "short_description": "short_description", "id": "id", "licence_type": "licence_type", "offering_type": "offering_type", "create_url": "create_url", "info_url": "info_url", "url": "url", "crn": "crn", "state": "state"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_license_providers()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for list_license_entitlements
#-----------------------------------------------------------------------------
class TestListLicenseEntitlements():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_license_entitlements()
#--------------------------------------------------------
@responses.activate
def test_list_license_entitlements_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
account_id = 'testString'
license_product_id = 'testString'
version_id = 'testString'
state = 'testString'
# Invoke method
response = service.list_license_entitlements(
account_id=account_id,
license_product_id=license_product_id,
version_id=version_id,
state=state,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'account_id={}'.format(account_id) in query_string
assert 'license_product_id={}'.format(license_product_id) in query_string
assert 'version_id={}'.format(version_id) in query_string
assert 'state={}'.format(state) in query_string
#--------------------------------------------------------
# test_list_license_entitlements_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_license_entitlements_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_license_entitlements()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for create_license_entitlement
#-----------------------------------------------------------------------------
class TestCreateLicenseEntitlement():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_license_entitlement()
#--------------------------------------------------------
@responses.activate
def test_create_license_entitlement_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements')
mock_response = '{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
name = 'testString'
effective_from = 'testString'
effective_until = 'testString'
version_id = 'testString'
license_id = 'testString'
license_owner_id = 'testString'
license_provider_id = 'testString'
license_product_id = 'testString'
account_id = 'testString'
# Invoke method
response = service.create_license_entitlement(
name=name,
effective_from=effective_from,
effective_until=effective_until,
version_id=version_id,
license_id=license_id,
license_owner_id=license_owner_id,
license_provider_id=license_provider_id,
license_product_id=license_product_id,
account_id=account_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'account_id={}'.format(account_id) in query_string
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['name'] == 'testString'
assert req_body['effective_from'] == 'testString'
assert req_body['effective_until'] == 'testString'
assert req_body['version_id'] == 'testString'
assert req_body['license_id'] == 'testString'
assert req_body['license_owner_id'] == 'testString'
assert req_body['license_provider_id'] == 'testString'
assert req_body['license_product_id'] == 'testString'
#--------------------------------------------------------
# test_create_license_entitlement_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_license_entitlement_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements')
mock_response = '{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.create_license_entitlement()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_license_entitlements
#-----------------------------------------------------------------------------
class TestGetLicenseEntitlements():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_license_entitlements()
#--------------------------------------------------------
@responses.activate
def test_get_license_entitlements_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/productID/testString')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_product_id = 'testString'
account_id = 'testString'
version_id = 'testString'
# Invoke method
response = service.get_license_entitlements(
license_product_id,
account_id=account_id,
version_id=version_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'account_id={}'.format(account_id) in query_string
assert 'version_id={}'.format(version_id) in query_string
#--------------------------------------------------------
# test_get_license_entitlements_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_license_entitlements_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/productID/testString')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_product_id = 'testString'
# Invoke method
response = service.get_license_entitlements(
license_product_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_license_entitlements_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_license_entitlements_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/productID/testString')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "id": "id", "crn": "crn", "url": "url", "offering_type": "offering_type", "state": "state", "effective_from": "effective_from", "effective_until": "effective_until", "account_id": "account_id", "owner_id": "owner_id", "version_id": "version_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_provider_url": "license_provider_url", "license_product_id": "license_product_id", "namespace_repository": "namespace_repository", "apikey": "apikey", "create_by": "create_by", "update_by": "update_by", "create_at": "create_at", "updated_at": "updated_at", "history": [{"action": "action", "user": "user", "date": "date"}], "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_product_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"license_product_id": license_product_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_license_entitlements(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_license_entitlement
#-----------------------------------------------------------------------------
class TestDeleteLicenseEntitlement():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_license_entitlement()
#--------------------------------------------------------
@responses.activate
def test_delete_license_entitlement_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
entitlement_id = 'testString'
account_id = 'testString'
# Invoke method
response = service.delete_license_entitlement(
entitlement_id,
account_id=account_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'account_id={}'.format(account_id) in query_string
#--------------------------------------------------------
# test_delete_license_entitlement_required_params()
#--------------------------------------------------------
@responses.activate
def test_delete_license_entitlement_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
entitlement_id = 'testString'
# Invoke method
response = service.delete_license_entitlement(
entitlement_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_license_entitlement_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_license_entitlement_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/entitlements/testString')
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
entitlement_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"entitlement_id": entitlement_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_license_entitlement(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_licenses
#-----------------------------------------------------------------------------
class TestGetLicenses():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_licenses()
#--------------------------------------------------------
@responses.activate
def test_get_licenses_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/licenses')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "offering_type": "offering_type", "seats_allowed": "seats_allowed", "seats_used": "seats_used", "owner_id": "owner_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_product_id": "license_product_id", "license_provider_url": "license_provider_url", "effective_from": "effective_from", "effective_until": "effective_until", "internal": true, "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_provider_id = 'testString'
account_id = 'testString'
name = 'testString'
license_type = 'testString'
license_product_id = 'testString'
# Invoke method
response = service.get_licenses(
license_provider_id,
account_id=account_id,
name=name,
license_type=license_type,
license_product_id=license_product_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'license_provider_id={}'.format(license_provider_id) in query_string
assert 'account_id={}'.format(account_id) in query_string
assert 'name={}'.format(name) in query_string
assert 'license_type={}'.format(license_type) in query_string
assert 'license_product_id={}'.format(license_product_id) in query_string
#--------------------------------------------------------
# test_get_licenses_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_licenses_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/licenses')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "offering_type": "offering_type", "seats_allowed": "seats_allowed", "seats_used": "seats_used", "owner_id": "owner_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_product_id": "license_product_id", "license_provider_url": "license_provider_url", "effective_from": "effective_from", "effective_until": "effective_until", "internal": true, "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_provider_id = 'testString'
# Invoke method
response = service.get_licenses(
license_provider_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'license_provider_id={}'.format(license_provider_id) in query_string
#--------------------------------------------------------
# test_get_licenses_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_licenses_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/license/licenses')
mock_response = '{"total_results": 13, "total_pages": 11, "prev_url": "prev_url", "next_url": "next_url", "resources": [{"name": "name", "offering_type": "offering_type", "seats_allowed": "seats_allowed", "seats_used": "seats_used", "owner_id": "owner_id", "license_offering_id": "license_offering_id", "license_id": "license_id", "license_owner_id": "license_owner_id", "license_type": "license_type", "license_provider_id": "license_provider_id", "license_product_id": "license_product_id", "license_provider_url": "license_provider_url", "effective_from": "effective_from", "effective_until": "effective_until", "internal": true, "offering_list": [{"id": "id", "name": "name", "label": "label", "offering_icon_url": "offering_icon_url", "account_id": "account_id", "catalog_id": "catalog_id"}]}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
license_provider_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"license_provider_id": license_provider_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_licenses(**req_copy)
# endregion
##############################################################################
# End of Service: Licensing
##############################################################################
##############################################################################
# Start of Service: CrossAccountSearch
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for search_license_versions
#-----------------------------------------------------------------------------
class TestSearchLicenseVersions():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# search_license_versions()
#--------------------------------------------------------
@responses.activate
def test_search_license_versions_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/search/license/versions')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Invoke method
response = service.search_license_versions(
q,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'q={}'.format(q) in query_string
#--------------------------------------------------------
# test_search_license_versions_value_error()
#--------------------------------------------------------
@responses.activate
def test_search_license_versions_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/search/license/versions')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"q": q,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.search_license_versions(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for search_license_offerings
#-----------------------------------------------------------------------------
class TestSearchLicenseOfferings():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# search_license_offerings()
#--------------------------------------------------------
@responses.activate
def test_search_license_offerings_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/search/license/offerings')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Invoke method
response = service.search_license_offerings(
q,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'q={}'.format(q) in query_string
#--------------------------------------------------------
# test_search_license_offerings_value_error()
#--------------------------------------------------------
@responses.activate
def test_search_license_offerings_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/search/license/offerings')
responses.add(responses.GET,
url,
status=200)
# Set up parameter values
q = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"q": q,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.search_license_offerings(**req_copy)
# endregion
##############################################################################
# End of Service: CrossAccountSearch
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for Account
#-----------------------------------------------------------------------------
class TestAccount():
#--------------------------------------------------------
# Test serialization/deserialization for Account
#--------------------------------------------------------
def test_account_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a json representation of a Account model
account_model_json = {}
account_model_json['id'] = 'testString'
account_model_json['account_filters'] = filters_model
# Construct a model instance of Account by calling from_dict on the json representation
account_model = Account.from_dict(account_model_json)
assert account_model != False
# Construct a model instance of Account by calling from_dict on the json representation
account_model_dict = Account.from_dict(account_model_json).__dict__
account_model2 = Account(**account_model_dict)
# Verify the model instances are equivalent
assert account_model == account_model2
# Convert model instance back to dict and verify no loss of data
account_model_json2 = account_model.to_dict()
assert account_model_json2 == account_model_json
#-----------------------------------------------------------------------------
# Test Class for AccountGroup
#-----------------------------------------------------------------------------
class TestAccountGroup():
#--------------------------------------------------------
# Test serialization/deserialization for AccountGroup
#--------------------------------------------------------
def test_account_group_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a json representation of a AccountGroup model
account_group_model_json = {}
account_group_model_json['id'] = 'testString'
account_group_model_json['account_filters'] = filters_model
# Construct a model instance of AccountGroup by calling from_dict on the json representation
account_group_model = AccountGroup.from_dict(account_group_model_json)
assert account_group_model != False
# Construct a model instance of AccountGroup by calling from_dict on the json representation
account_group_model_dict = AccountGroup.from_dict(account_group_model_json).__dict__
account_group_model2 = AccountGroup(**account_group_model_dict)
# Verify the model instances are equivalent
assert account_group_model == account_group_model2
# Convert model instance back to dict and verify no loss of data
account_group_model_json2 = account_group_model.to_dict()
assert account_group_model_json2 == account_group_model_json
#-----------------------------------------------------------------------------
# Test Class for AccumulatedFilters
#-----------------------------------------------------------------------------
class TestAccumulatedFilters():
#--------------------------------------------------------
# Test serialization/deserialization for AccumulatedFilters
#--------------------------------------------------------
def test_accumulated_filters_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
accumulated_filters_catalog_filters_item_catalog_model = {} # AccumulatedFiltersCatalogFiltersItemCatalog
accumulated_filters_catalog_filters_item_catalog_model['id'] = 'testString'
accumulated_filters_catalog_filters_item_catalog_model['name'] = 'testString'
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
accumulated_filters_catalog_filters_item_model = {} # AccumulatedFiltersCatalogFiltersItem
accumulated_filters_catalog_filters_item_model['catalog'] = accumulated_filters_catalog_filters_item_catalog_model
accumulated_filters_catalog_filters_item_model['filters'] = filters_model
# Construct a json representation of a AccumulatedFilters model
accumulated_filters_model_json = {}
accumulated_filters_model_json['account_filters'] = [filters_model]
accumulated_filters_model_json['catalog_filters'] = [accumulated_filters_catalog_filters_item_model]
# Construct a model instance of AccumulatedFilters by calling from_dict on the json representation
accumulated_filters_model = AccumulatedFilters.from_dict(accumulated_filters_model_json)
assert accumulated_filters_model != False
# Construct a model instance of AccumulatedFilters by calling from_dict on the json representation
accumulated_filters_model_dict = AccumulatedFilters.from_dict(accumulated_filters_model_json).__dict__
accumulated_filters_model2 = AccumulatedFilters(**accumulated_filters_model_dict)
# Verify the model instances are equivalent
assert accumulated_filters_model == accumulated_filters_model2
# Convert model instance back to dict and verify no loss of data
accumulated_filters_model_json2 = accumulated_filters_model.to_dict()
assert accumulated_filters_model_json2 == accumulated_filters_model_json
#-----------------------------------------------------------------------------
# Test Class for AccumulatedFiltersCatalogFiltersItem
#-----------------------------------------------------------------------------
class TestAccumulatedFiltersCatalogFiltersItem():
#--------------------------------------------------------
# Test serialization/deserialization for AccumulatedFiltersCatalogFiltersItem
#--------------------------------------------------------
def test_accumulated_filters_catalog_filters_item_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
accumulated_filters_catalog_filters_item_catalog_model = {} # AccumulatedFiltersCatalogFiltersItemCatalog
accumulated_filters_catalog_filters_item_catalog_model['id'] = 'testString'
accumulated_filters_catalog_filters_item_catalog_model['name'] = 'testString'
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
# Construct a json representation of a AccumulatedFiltersCatalogFiltersItem model
accumulated_filters_catalog_filters_item_model_json = {}
accumulated_filters_catalog_filters_item_model_json['catalog'] = accumulated_filters_catalog_filters_item_catalog_model
accumulated_filters_catalog_filters_item_model_json['filters'] = filters_model
# Construct a model instance of AccumulatedFiltersCatalogFiltersItem by calling from_dict on the json representation
accumulated_filters_catalog_filters_item_model = AccumulatedFiltersCatalogFiltersItem.from_dict(accumulated_filters_catalog_filters_item_model_json)
assert accumulated_filters_catalog_filters_item_model != False
# Construct a model instance of AccumulatedFiltersCatalogFiltersItem by calling from_dict on the json representation
accumulated_filters_catalog_filters_item_model_dict = AccumulatedFiltersCatalogFiltersItem.from_dict(accumulated_filters_catalog_filters_item_model_json).__dict__
accumulated_filters_catalog_filters_item_model2 = AccumulatedFiltersCatalogFiltersItem(**accumulated_filters_catalog_filters_item_model_dict)
# Verify the model instances are equivalent
assert accumulated_filters_catalog_filters_item_model == accumulated_filters_catalog_filters_item_model2
# Convert model instance back to dict and verify no loss of data
accumulated_filters_catalog_filters_item_model_json2 = accumulated_filters_catalog_filters_item_model.to_dict()
assert accumulated_filters_catalog_filters_item_model_json2 == accumulated_filters_catalog_filters_item_model_json
#-----------------------------------------------------------------------------
# Test Class for AccumulatedFiltersCatalogFiltersItemCatalog
#-----------------------------------------------------------------------------
class TestAccumulatedFiltersCatalogFiltersItemCatalog():
#--------------------------------------------------------
# Test serialization/deserialization for AccumulatedFiltersCatalogFiltersItemCatalog
#--------------------------------------------------------
def test_accumulated_filters_catalog_filters_item_catalog_serialization(self):
# Construct a json representation of a AccumulatedFiltersCatalogFiltersItemCatalog model
accumulated_filters_catalog_filters_item_catalog_model_json = {}
accumulated_filters_catalog_filters_item_catalog_model_json['id'] = 'testString'
accumulated_filters_catalog_filters_item_catalog_model_json['name'] = 'testString'
# Construct a model instance of AccumulatedFiltersCatalogFiltersItemCatalog by calling from_dict on the json representation
accumulated_filters_catalog_filters_item_catalog_model = AccumulatedFiltersCatalogFiltersItemCatalog.from_dict(accumulated_filters_catalog_filters_item_catalog_model_json)
assert accumulated_filters_catalog_filters_item_catalog_model != False
# Construct a model instance of AccumulatedFiltersCatalogFiltersItemCatalog by calling from_dict on the json representation
accumulated_filters_catalog_filters_item_catalog_model_dict = AccumulatedFiltersCatalogFiltersItemCatalog.from_dict(accumulated_filters_catalog_filters_item_catalog_model_json).__dict__
accumulated_filters_catalog_filters_item_catalog_model2 = AccumulatedFiltersCatalogFiltersItemCatalog(**accumulated_filters_catalog_filters_item_catalog_model_dict)
# Verify the model instances are equivalent
assert accumulated_filters_catalog_filters_item_catalog_model == accumulated_filters_catalog_filters_item_catalog_model2
# Convert model instance back to dict and verify no loss of data
accumulated_filters_catalog_filters_item_catalog_model_json2 = accumulated_filters_catalog_filters_item_catalog_model.to_dict()
assert accumulated_filters_catalog_filters_item_catalog_model_json2 == accumulated_filters_catalog_filters_item_catalog_model_json
#-----------------------------------------------------------------------------
# Test Class for ApprovalResult
#-----------------------------------------------------------------------------
class TestApprovalResult():
#--------------------------------------------------------
# Test serialization/deserialization for ApprovalResult
#--------------------------------------------------------
def test_approval_result_serialization(self):
# Construct a json representation of a ApprovalResult model
approval_result_model_json = {}
approval_result_model_json['ibm'] = True
approval_result_model_json['public'] = True
approval_result_model_json['changed'] = True
# Construct a model instance of ApprovalResult by calling from_dict on the json representation
approval_result_model = ApprovalResult.from_dict(approval_result_model_json)
assert approval_result_model != False
# Construct a model instance of ApprovalResult by calling from_dict on the json representation
approval_result_model_dict = ApprovalResult.from_dict(approval_result_model_json).__dict__
approval_result_model2 = ApprovalResult(**approval_result_model_dict)
# Verify the model instances are equivalent
assert approval_result_model == approval_result_model2
# Convert model instance back to dict and verify no loss of data
approval_result_model_json2 = approval_result_model.to_dict()
assert approval_result_model_json2 == approval_result_model_json
#-----------------------------------------------------------------------------
# Test Class for Catalog
#-----------------------------------------------------------------------------
class TestCatalog():
#--------------------------------------------------------
# Test serialization/deserialization for Catalog
#--------------------------------------------------------
def test_catalog_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
syndication_cluster_model = {} # SyndicationCluster
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
syndication_authorization_model = {} # SyndicationAuthorization
syndication_authorization_model['token'] = 'testString'
syndication_authorization_model['last_run'] = '2020-01-28T18:40:40.123456Z'
syndication_history_model = {} # SyndicationHistory
syndication_history_model['namespaces'] = ['testString']
syndication_history_model['clusters'] = [syndication_cluster_model]
syndication_history_model['last_run'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
syndication_resource_model = {} # SyndicationResource
syndication_resource_model['remove_related_components'] = True
syndication_resource_model['clusters'] = [syndication_cluster_model]
syndication_resource_model['history'] = syndication_history_model
syndication_resource_model['authorization'] = syndication_authorization_model
# Construct a json representation of a Catalog model
catalog_model_json = {}
catalog_model_json['id'] = 'testString'
catalog_model_json['_rev'] = 'testString'
catalog_model_json['label'] = 'testString'
catalog_model_json['short_description'] = 'testString'
catalog_model_json['catalog_icon_url'] = 'testString'
catalog_model_json['tags'] = ['testString']
catalog_model_json['url'] = 'testString'
catalog_model_json['crn'] = 'testString'
catalog_model_json['offerings_url'] = 'testString'
catalog_model_json['features'] = [feature_model]
catalog_model_json['disabled'] = True
catalog_model_json['created'] = '2020-01-28T18:40:40.123456Z'
catalog_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
catalog_model_json['resource_group_id'] = 'testString'
catalog_model_json['owning_account'] = 'testString'
catalog_model_json['catalog_filters'] = filters_model
catalog_model_json['syndication_settings'] = syndication_resource_model
# Construct a model instance of Catalog by calling from_dict on the json representation
catalog_model = Catalog.from_dict(catalog_model_json)
assert catalog_model != False
# Construct a model instance of Catalog by calling from_dict on the json representation
catalog_model_dict = Catalog.from_dict(catalog_model_json).__dict__
catalog_model2 = Catalog(**catalog_model_dict)
# Verify the model instances are equivalent
assert catalog_model == catalog_model2
# Convert model instance back to dict and verify no loss of data
catalog_model_json2 = catalog_model.to_dict()
assert catalog_model_json2 == catalog_model_json
#-----------------------------------------------------------------------------
# Test Class for CatalogSearchResult
#-----------------------------------------------------------------------------
class TestCatalogSearchResult():
#--------------------------------------------------------
# Test serialization/deserialization for CatalogSearchResult
#--------------------------------------------------------
def test_catalog_search_result_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
syndication_cluster_model = {} # SyndicationCluster
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
syndication_authorization_model = {} # SyndicationAuthorization
syndication_authorization_model['token'] = 'testString'
syndication_authorization_model['last_run'] = '2020-01-28T18:40:40.123456Z'
syndication_history_model = {} # SyndicationHistory
syndication_history_model['namespaces'] = ['testString']
syndication_history_model['clusters'] = [syndication_cluster_model]
syndication_history_model['last_run'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
syndication_resource_model = {} # SyndicationResource
syndication_resource_model['remove_related_components'] = True
syndication_resource_model['clusters'] = [syndication_cluster_model]
syndication_resource_model['history'] = syndication_history_model
syndication_resource_model['authorization'] = syndication_authorization_model
catalog_model = {} # Catalog
catalog_model['id'] = 'testString'
catalog_model['_rev'] = 'testString'
catalog_model['label'] = 'testString'
catalog_model['short_description'] = 'testString'
catalog_model['catalog_icon_url'] = 'testString'
catalog_model['tags'] = ['testString']
catalog_model['url'] = 'testString'
catalog_model['crn'] = 'testString'
catalog_model['offerings_url'] = 'testString'
catalog_model['features'] = [feature_model]
catalog_model['disabled'] = True
catalog_model['created'] = '2020-01-28T18:40:40.123456Z'
catalog_model['updated'] = '2020-01-28T18:40:40.123456Z'
catalog_model['resource_group_id'] = 'testString'
catalog_model['owning_account'] = 'testString'
catalog_model['catalog_filters'] = filters_model
catalog_model['syndication_settings'] = syndication_resource_model
# Construct a json representation of a CatalogSearchResult model
catalog_search_result_model_json = {}
catalog_search_result_model_json['offset'] = 38
catalog_search_result_model_json['limit'] = 38
catalog_search_result_model_json['total_count'] = 38
catalog_search_result_model_json['resource_count'] = 38
catalog_search_result_model_json['first'] = 'testString'
catalog_search_result_model_json['last'] = 'testString'
catalog_search_result_model_json['prev'] = 'testString'
catalog_search_result_model_json['next'] = 'testString'
catalog_search_result_model_json['resources'] = [catalog_model]
# Construct a model instance of CatalogSearchResult by calling from_dict on the json representation
catalog_search_result_model = CatalogSearchResult.from_dict(catalog_search_result_model_json)
assert catalog_search_result_model != False
# Construct a model instance of CatalogSearchResult by calling from_dict on the json representation
catalog_search_result_model_dict = CatalogSearchResult.from_dict(catalog_search_result_model_json).__dict__
catalog_search_result_model2 = CatalogSearchResult(**catalog_search_result_model_dict)
# Verify the model instances are equivalent
assert catalog_search_result_model == catalog_search_result_model2
# Convert model instance back to dict and verify no loss of data
catalog_search_result_model_json2 = catalog_search_result_model.to_dict()
assert catalog_search_result_model_json2 == catalog_search_result_model_json
#-----------------------------------------------------------------------------
# Test Class for CategoryFilter
#-----------------------------------------------------------------------------
class TestCategoryFilter():
#--------------------------------------------------------
# Test serialization/deserialization for CategoryFilter
#--------------------------------------------------------
def test_category_filter_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
# Construct a json representation of a CategoryFilter model
category_filter_model_json = {}
category_filter_model_json['include'] = True
category_filter_model_json['filter'] = filter_terms_model
# Construct a model instance of CategoryFilter by calling from_dict on the json representation
category_filter_model = CategoryFilter.from_dict(category_filter_model_json)
assert category_filter_model != False
# Construct a model instance of CategoryFilter by calling from_dict on the json representation
category_filter_model_dict = CategoryFilter.from_dict(category_filter_model_json).__dict__
category_filter_model2 = CategoryFilter(**category_filter_model_dict)
# Verify the model instances are equivalent
assert category_filter_model == category_filter_model2
# Convert model instance back to dict and verify no loss of data
category_filter_model_json2 = category_filter_model.to_dict()
assert category_filter_model_json2 == category_filter_model_json
#-----------------------------------------------------------------------------
# Test Class for ClusterInfo
#-----------------------------------------------------------------------------
class TestClusterInfo():
#--------------------------------------------------------
# Test serialization/deserialization for ClusterInfo
#--------------------------------------------------------
def test_cluster_info_serialization(self):
# Construct a json representation of a ClusterInfo model
cluster_info_model_json = {}
cluster_info_model_json['resource_group_id'] = 'testString'
cluster_info_model_json['resource_group_name'] = 'testString'
cluster_info_model_json['id'] = 'testString'
cluster_info_model_json['name'] = 'testString'
cluster_info_model_json['region'] = 'testString'
# Construct a model instance of ClusterInfo by calling from_dict on the json representation
cluster_info_model = ClusterInfo.from_dict(cluster_info_model_json)
assert cluster_info_model != False
# Construct a model instance of ClusterInfo by calling from_dict on the json representation
cluster_info_model_dict = ClusterInfo.from_dict(cluster_info_model_json).__dict__
cluster_info_model2 = ClusterInfo(**cluster_info_model_dict)
# Verify the model instances are equivalent
assert cluster_info_model == cluster_info_model2
# Convert model instance back to dict and verify no loss of data
cluster_info_model_json2 = cluster_info_model.to_dict()
assert cluster_info_model_json2 == cluster_info_model_json
#-----------------------------------------------------------------------------
# Test Class for ClusterSearchResult
#-----------------------------------------------------------------------------
class TestClusterSearchResult():
#--------------------------------------------------------
# Test serialization/deserialization for ClusterSearchResult
#--------------------------------------------------------
def test_cluster_search_result_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
cluster_info_model = {} # ClusterInfo
cluster_info_model['resource_group_id'] = 'testString'
cluster_info_model['resource_group_name'] = 'testString'
cluster_info_model['id'] = 'testString'
cluster_info_model['name'] = 'testString'
cluster_info_model['region'] = 'testString'
# Construct a json representation of a ClusterSearchResult model
cluster_search_result_model_json = {}
cluster_search_result_model_json['offset'] = 38
cluster_search_result_model_json['limit'] = 38
cluster_search_result_model_json['total_count'] = 38
cluster_search_result_model_json['resource_count'] = 38
cluster_search_result_model_json['first'] = 'testString'
cluster_search_result_model_json['last'] = 'testString'
cluster_search_result_model_json['prev'] = 'testString'
cluster_search_result_model_json['next'] = 'testString'
cluster_search_result_model_json['resources'] = [cluster_info_model]
# Construct a model instance of ClusterSearchResult by calling from_dict on the json representation
cluster_search_result_model = ClusterSearchResult.from_dict(cluster_search_result_model_json)
assert cluster_search_result_model != False
# Construct a model instance of ClusterSearchResult by calling from_dict on the json representation
cluster_search_result_model_dict = ClusterSearchResult.from_dict(cluster_search_result_model_json).__dict__
cluster_search_result_model2 = ClusterSearchResult(**cluster_search_result_model_dict)
# Verify the model instances are equivalent
assert cluster_search_result_model == cluster_search_result_model2
# Convert model instance back to dict and verify no loss of data
cluster_search_result_model_json2 = cluster_search_result_model.to_dict()
assert cluster_search_result_model_json2 == cluster_search_result_model_json
#-----------------------------------------------------------------------------
# Test Class for Configuration
#-----------------------------------------------------------------------------
class TestConfiguration():
#--------------------------------------------------------
# Test serialization/deserialization for Configuration
#--------------------------------------------------------
def test_configuration_serialization(self):
# Construct a json representation of a Configuration model
configuration_model_json = {}
configuration_model_json['key'] = 'testString'
configuration_model_json['type'] = 'testString'
configuration_model_json['default_value'] = { 'foo': 'bar' }
configuration_model_json['value_constraint'] = 'testString'
configuration_model_json['description'] = 'testString'
configuration_model_json['required'] = True
configuration_model_json['options'] = [{ 'foo': 'bar' }]
configuration_model_json['hidden'] = True
# Construct a model instance of Configuration by calling from_dict on the json representation
configuration_model = Configuration.from_dict(configuration_model_json)
assert configuration_model != False
# Construct a model instance of Configuration by calling from_dict on the json representation
configuration_model_dict = Configuration.from_dict(configuration_model_json).__dict__
configuration_model2 = Configuration(**configuration_model_dict)
# Verify the model instances are equivalent
assert configuration_model == configuration_model2
# Convert model instance back to dict and verify no loss of data
configuration_model_json2 = configuration_model.to_dict()
assert configuration_model_json2 == configuration_model_json
#-----------------------------------------------------------------------------
# Test Class for DeployRequestBodySchematics
#-----------------------------------------------------------------------------
class TestDeployRequestBodySchematics():
#--------------------------------------------------------
# Test serialization/deserialization for DeployRequestBodySchematics
#--------------------------------------------------------
def test_deploy_request_body_schematics_serialization(self):
# Construct a json representation of a DeployRequestBodySchematics model
deploy_request_body_schematics_model_json = {}
deploy_request_body_schematics_model_json['name'] = 'testString'
deploy_request_body_schematics_model_json['description'] = 'testString'
deploy_request_body_schematics_model_json['tags'] = ['testString']
deploy_request_body_schematics_model_json['resource_group_id'] = 'testString'
# Construct a model instance of DeployRequestBodySchematics by calling from_dict on the json representation
deploy_request_body_schematics_model = DeployRequestBodySchematics.from_dict(deploy_request_body_schematics_model_json)
assert deploy_request_body_schematics_model != False
# Construct a model instance of DeployRequestBodySchematics by calling from_dict on the json representation
deploy_request_body_schematics_model_dict = DeployRequestBodySchematics.from_dict(deploy_request_body_schematics_model_json).__dict__
deploy_request_body_schematics_model2 = DeployRequestBodySchematics(**deploy_request_body_schematics_model_dict)
# Verify the model instances are equivalent
assert deploy_request_body_schematics_model == deploy_request_body_schematics_model2
# Convert model instance back to dict and verify no loss of data
deploy_request_body_schematics_model_json2 = deploy_request_body_schematics_model.to_dict()
assert deploy_request_body_schematics_model_json2 == deploy_request_body_schematics_model_json
#-----------------------------------------------------------------------------
# Test Class for DeployRequirementsCheck
#-----------------------------------------------------------------------------
class TestDeployRequirementsCheck():
#--------------------------------------------------------
# Test serialization/deserialization for DeployRequirementsCheck
#--------------------------------------------------------
def test_deploy_requirements_check_serialization(self):
# Construct a json representation of a DeployRequirementsCheck model
deploy_requirements_check_model_json = {}
deploy_requirements_check_model_json['pre_install'] = { 'foo': 'bar' }
deploy_requirements_check_model_json['install'] = { 'foo': 'bar' }
# Construct a model instance of DeployRequirementsCheck by calling from_dict on the json representation
deploy_requirements_check_model = DeployRequirementsCheck.from_dict(deploy_requirements_check_model_json)
assert deploy_requirements_check_model != False
# Construct a model instance of DeployRequirementsCheck by calling from_dict on the json representation
deploy_requirements_check_model_dict = DeployRequirementsCheck.from_dict(deploy_requirements_check_model_json).__dict__
deploy_requirements_check_model2 = DeployRequirementsCheck(**deploy_requirements_check_model_dict)
# Verify the model instances are equivalent
assert deploy_requirements_check_model == deploy_requirements_check_model2
# Convert model instance back to dict and verify no loss of data
deploy_requirements_check_model_json2 = deploy_requirements_check_model.to_dict()
assert deploy_requirements_check_model_json2 == deploy_requirements_check_model_json
#-----------------------------------------------------------------------------
# Test Class for Deployment
#-----------------------------------------------------------------------------
class TestDeployment():
#--------------------------------------------------------
# Test serialization/deserialization for Deployment
#--------------------------------------------------------
def test_deployment_serialization(self):
# Construct a json representation of a Deployment model
deployment_model_json = {}
deployment_model_json['id'] = 'testString'
deployment_model_json['label'] = 'testString'
deployment_model_json['name'] = 'testString'
deployment_model_json['short_description'] = 'testString'
deployment_model_json['long_description'] = 'testString'
deployment_model_json['metadata'] = { 'foo': 'bar' }
deployment_model_json['tags'] = ['testString']
deployment_model_json['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
# Construct a model instance of Deployment by calling from_dict on the json representation
deployment_model = Deployment.from_dict(deployment_model_json)
assert deployment_model != False
# Construct a model instance of Deployment by calling from_dict on the json representation
deployment_model_dict = Deployment.from_dict(deployment_model_json).__dict__
deployment_model2 = Deployment(**deployment_model_dict)
# Verify the model instances are equivalent
assert deployment_model == deployment_model2
# Convert model instance back to dict and verify no loss of data
deployment_model_json2 = deployment_model.to_dict()
assert deployment_model_json2 == deployment_model_json
#-----------------------------------------------------------------------------
# Test Class for Enterprise
#-----------------------------------------------------------------------------
class TestEnterprise():
#--------------------------------------------------------
# Test serialization/deserialization for Enterprise
#--------------------------------------------------------
def test_enterprise_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
account_group_model = {} # AccountGroup
account_group_model['id'] = 'testString'
account_group_model['account_filters'] = filters_model
enterprise_account_groups_model = {} # EnterpriseAccountGroups
enterprise_account_groups_model['keys'] = account_group_model
# Construct a json representation of a Enterprise model
enterprise_model_json = {}
enterprise_model_json['id'] = 'testString'
enterprise_model_json['_rev'] = 'testString'
enterprise_model_json['account_filters'] = filters_model
enterprise_model_json['account_groups'] = enterprise_account_groups_model
# Construct a model instance of Enterprise by calling from_dict on the json representation
enterprise_model = Enterprise.from_dict(enterprise_model_json)
assert enterprise_model != False
# Construct a model instance of Enterprise by calling from_dict on the json representation
enterprise_model_dict = Enterprise.from_dict(enterprise_model_json).__dict__
enterprise_model2 = Enterprise(**enterprise_model_dict)
# Verify the model instances are equivalent
assert enterprise_model == enterprise_model2
# Convert model instance back to dict and verify no loss of data
enterprise_model_json2 = enterprise_model.to_dict()
assert enterprise_model_json2 == enterprise_model_json
#-----------------------------------------------------------------------------
# Test Class for EnterpriseAccountGroups
#-----------------------------------------------------------------------------
class TestEnterpriseAccountGroups():
#--------------------------------------------------------
# Test serialization/deserialization for EnterpriseAccountGroups
#--------------------------------------------------------
def test_enterprise_account_groups_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
filters_model = {} # Filters
filters_model['include_all'] = True
filters_model['category_filters'] = {}
filters_model['id_filters'] = id_filter_model
account_group_model = {} # AccountGroup
account_group_model['id'] = 'testString'
account_group_model['account_filters'] = filters_model
# Construct a json representation of a EnterpriseAccountGroups model
enterprise_account_groups_model_json = {}
enterprise_account_groups_model_json['keys'] = account_group_model
# Construct a model instance of EnterpriseAccountGroups by calling from_dict on the json representation
enterprise_account_groups_model = EnterpriseAccountGroups.from_dict(enterprise_account_groups_model_json)
assert enterprise_account_groups_model != False
# Construct a model instance of EnterpriseAccountGroups by calling from_dict on the json representation
enterprise_account_groups_model_dict = EnterpriseAccountGroups.from_dict(enterprise_account_groups_model_json).__dict__
enterprise_account_groups_model2 = EnterpriseAccountGroups(**enterprise_account_groups_model_dict)
# Verify the model instances are equivalent
assert enterprise_account_groups_model == enterprise_account_groups_model2
# Convert model instance back to dict and verify no loss of data
enterprise_account_groups_model_json2 = enterprise_account_groups_model.to_dict()
assert enterprise_account_groups_model_json2 == enterprise_account_groups_model_json
#-----------------------------------------------------------------------------
# Test Class for Feature
#-----------------------------------------------------------------------------
class TestFeature():
#--------------------------------------------------------
# Test serialization/deserialization for Feature
#--------------------------------------------------------
def test_feature_serialization(self):
# Construct a json representation of a Feature model
feature_model_json = {}
feature_model_json['title'] = 'testString'
feature_model_json['description'] = 'testString'
# Construct a model instance of Feature by calling from_dict on the json representation
feature_model = Feature.from_dict(feature_model_json)
assert feature_model != False
# Construct a model instance of Feature by calling from_dict on the json representation
feature_model_dict = Feature.from_dict(feature_model_json).__dict__
feature_model2 = Feature(**feature_model_dict)
# Verify the model instances are equivalent
assert feature_model == feature_model2
# Convert model instance back to dict and verify no loss of data
feature_model_json2 = feature_model.to_dict()
assert feature_model_json2 == feature_model_json
#-----------------------------------------------------------------------------
# Test Class for FilterTerms
#-----------------------------------------------------------------------------
class TestFilterTerms():
#--------------------------------------------------------
# Test serialization/deserialization for FilterTerms
#--------------------------------------------------------
def test_filter_terms_serialization(self):
# Construct a json representation of a FilterTerms model
filter_terms_model_json = {}
filter_terms_model_json['filter_terms'] = ['testString']
# Construct a model instance of FilterTerms by calling from_dict on the json representation
filter_terms_model = FilterTerms.from_dict(filter_terms_model_json)
assert filter_terms_model != False
# Construct a model instance of FilterTerms by calling from_dict on the json representation
filter_terms_model_dict = FilterTerms.from_dict(filter_terms_model_json).__dict__
filter_terms_model2 = FilterTerms(**filter_terms_model_dict)
# Verify the model instances are equivalent
assert filter_terms_model == filter_terms_model2
# Convert model instance back to dict and verify no loss of data
filter_terms_model_json2 = filter_terms_model.to_dict()
assert filter_terms_model_json2 == filter_terms_model_json
#-----------------------------------------------------------------------------
# Test Class for Filters
#-----------------------------------------------------------------------------
class TestFilters():
#--------------------------------------------------------
# Test serialization/deserialization for Filters
#--------------------------------------------------------
def test_filters_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
category_filter_model = {} # CategoryFilter
category_filter_model['include'] = True
category_filter_model['filter'] = filter_terms_model
id_filter_model = {} # IDFilter
id_filter_model['include'] = filter_terms_model
id_filter_model['exclude'] = filter_terms_model
# Construct a json representation of a Filters model
filters_model_json = {}
filters_model_json['include_all'] = True
filters_model_json['category_filters'] = {}
filters_model_json['id_filters'] = id_filter_model
# Construct a model instance of Filters by calling from_dict on the json representation
filters_model = Filters.from_dict(filters_model_json)
assert filters_model != False
# Construct a model instance of Filters by calling from_dict on the json representation
filters_model_dict = Filters.from_dict(filters_model_json).__dict__
filters_model2 = Filters(**filters_model_dict)
# Verify the model instances are equivalent
assert filters_model == filters_model2
# Convert model instance back to dict and verify no loss of data
filters_model_json2 = filters_model.to_dict()
assert filters_model_json2 == filters_model_json
#-----------------------------------------------------------------------------
# Test Class for HelmChart
#-----------------------------------------------------------------------------
class TestHelmChart():
#--------------------------------------------------------
# Test serialization/deserialization for HelmChart
#--------------------------------------------------------
def test_helm_chart_serialization(self):
# Construct a json representation of a HelmChart model
helm_chart_model_json = {}
helm_chart_model_json['name'] = 'testString'
helm_chart_model_json['description'] = 'testString'
helm_chart_model_json['icon'] = 'testString'
helm_chart_model_json['version'] = 'testString'
helm_chart_model_json['appVersion'] = 'testString'
# Construct a model instance of HelmChart by calling from_dict on the json representation
helm_chart_model = HelmChart.from_dict(helm_chart_model_json)
assert helm_chart_model != False
# Construct a model instance of HelmChart by calling from_dict on the json representation
helm_chart_model_dict = HelmChart.from_dict(helm_chart_model_json).__dict__
helm_chart_model2 = HelmChart(**helm_chart_model_dict)
# Verify the model instances are equivalent
assert helm_chart_model == helm_chart_model2
# Convert model instance back to dict and verify no loss of data
helm_chart_model_json2 = helm_chart_model.to_dict()
assert helm_chart_model_json2 == helm_chart_model_json
#-----------------------------------------------------------------------------
# Test Class for HelmPackage
#-----------------------------------------------------------------------------
class TestHelmPackage():
#--------------------------------------------------------
# Test serialization/deserialization for HelmPackage
#--------------------------------------------------------
def test_helm_package_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
helm_chart_model = {} # HelmChart
helm_chart_model['name'] = 'testString'
helm_chart_model['description'] = 'testString'
helm_chart_model['icon'] = 'testString'
helm_chart_model['version'] = 'testString'
helm_chart_model['appVersion'] = 'testString'
helm_package_chart_model = {} # HelmPackageChart
helm_package_chart_model['Chart.yaml'] = helm_chart_model
helm_package_chart_model['sha'] = { 'foo': 'bar' }
helm_package_chart_model['README.md'] = 'testString'
helm_package_chart_model['values-metadata'] = { 'foo': 'bar' }
helm_package_chart_model['license-metadata'] = { 'foo': 'bar' }
# Construct a json representation of a HelmPackage model
helm_package_model_json = {}
helm_package_model_json['chart'] = helm_package_chart_model
# Construct a model instance of HelmPackage by calling from_dict on the json representation
helm_package_model = HelmPackage.from_dict(helm_package_model_json)
assert helm_package_model != False
# Construct a model instance of HelmPackage by calling from_dict on the json representation
helm_package_model_dict = HelmPackage.from_dict(helm_package_model_json).__dict__
helm_package_model2 = HelmPackage(**helm_package_model_dict)
# Verify the model instances are equivalent
assert helm_package_model == helm_package_model2
# Convert model instance back to dict and verify no loss of data
helm_package_model_json2 = helm_package_model.to_dict()
assert helm_package_model_json2 == helm_package_model_json
#-----------------------------------------------------------------------------
# Test Class for HelmPackageChart
#-----------------------------------------------------------------------------
class TestHelmPackageChart():
#--------------------------------------------------------
# Test serialization/deserialization for HelmPackageChart
#--------------------------------------------------------
def test_helm_package_chart_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
helm_chart_model = {} # HelmChart
helm_chart_model['name'] = 'testString'
helm_chart_model['description'] = 'testString'
helm_chart_model['icon'] = 'testString'
helm_chart_model['version'] = 'testString'
helm_chart_model['appVersion'] = 'testString'
# Construct a json representation of a HelmPackageChart model
helm_package_chart_model_json = {}
helm_package_chart_model_json['Chart.yaml'] = helm_chart_model
helm_package_chart_model_json['sha'] = { 'foo': 'bar' }
helm_package_chart_model_json['README.md'] = 'testString'
helm_package_chart_model_json['values-metadata'] = { 'foo': 'bar' }
helm_package_chart_model_json['license-metadata'] = { 'foo': 'bar' }
# Construct a model instance of HelmPackageChart by calling from_dict on the json representation
helm_package_chart_model = HelmPackageChart.from_dict(helm_package_chart_model_json)
assert helm_package_chart_model != False
# Construct a model instance of HelmPackageChart by calling from_dict on the json representation
helm_package_chart_model_dict = HelmPackageChart.from_dict(helm_package_chart_model_json).__dict__
helm_package_chart_model2 = HelmPackageChart(**helm_package_chart_model_dict)
# Verify the model instances are equivalent
assert helm_package_chart_model == helm_package_chart_model2
# Convert model instance back to dict and verify no loss of data
helm_package_chart_model_json2 = helm_package_chart_model.to_dict()
assert helm_package_chart_model_json2 == helm_package_chart_model_json
#-----------------------------------------------------------------------------
# Test Class for HelmRepoList
#-----------------------------------------------------------------------------
class TestHelmRepoList():
#--------------------------------------------------------
# Test serialization/deserialization for HelmRepoList
#--------------------------------------------------------
def test_helm_repo_list_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
maintainers_model = {} # Maintainers
maintainers_model['email'] = 'testString'
maintainers_model['name'] = 'testString'
helm_repo_list_chart_model = {} # HelmRepoListChart
helm_repo_list_chart_model['api_version'] = 'testString'
helm_repo_list_chart_model['created'] = '2020-01-28T18:40:40.123456Z'
helm_repo_list_chart_model['description'] = 'testString'
helm_repo_list_chart_model['deprecated'] = True
helm_repo_list_chart_model['digest'] = 'testString'
helm_repo_list_chart_model['home'] = 'testString'
helm_repo_list_chart_model['icon'] = 'testString'
helm_repo_list_chart_model['keywords'] = ['testString']
helm_repo_list_chart_model['maintainers'] = [maintainers_model]
helm_repo_list_chart_model['name'] = 'testString'
helm_repo_list_chart_model['tiller_version'] = 'testString'
helm_repo_list_chart_model['urls'] = ['testString']
helm_repo_list_chart_model['sources'] = ['testString']
helm_repo_list_chart_model['version'] = 'testString'
helm_repo_list_chart_model['appVersion'] = 'testString'
# Construct a json representation of a HelmRepoList model
helm_repo_list_model_json = {}
helm_repo_list_model_json['chart'] = helm_repo_list_chart_model
# Construct a model instance of HelmRepoList by calling from_dict on the json representation
helm_repo_list_model = HelmRepoList.from_dict(helm_repo_list_model_json)
assert helm_repo_list_model != False
# Construct a model instance of HelmRepoList by calling from_dict on the json representation
helm_repo_list_model_dict = HelmRepoList.from_dict(helm_repo_list_model_json).__dict__
helm_repo_list_model2 = HelmRepoList(**helm_repo_list_model_dict)
# Verify the model instances are equivalent
assert helm_repo_list_model == helm_repo_list_model2
# Convert model instance back to dict and verify no loss of data
helm_repo_list_model_json2 = helm_repo_list_model.to_dict()
assert helm_repo_list_model_json2 == helm_repo_list_model_json
#-----------------------------------------------------------------------------
# Test Class for HelmRepoListChart
#-----------------------------------------------------------------------------
class TestHelmRepoListChart():
#--------------------------------------------------------
# Test serialization/deserialization for HelmRepoListChart
#--------------------------------------------------------
def test_helm_repo_list_chart_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
maintainers_model = {} # Maintainers
maintainers_model['email'] = 'testString'
maintainers_model['name'] = 'testString'
# Construct a json representation of a HelmRepoListChart model
helm_repo_list_chart_model_json = {}
helm_repo_list_chart_model_json['api_version'] = 'testString'
helm_repo_list_chart_model_json['created'] = '2020-01-28T18:40:40.123456Z'
helm_repo_list_chart_model_json['description'] = 'testString'
helm_repo_list_chart_model_json['deprecated'] = True
helm_repo_list_chart_model_json['digest'] = 'testString'
helm_repo_list_chart_model_json['home'] = 'testString'
helm_repo_list_chart_model_json['icon'] = 'testString'
helm_repo_list_chart_model_json['keywords'] = ['testString']
helm_repo_list_chart_model_json['maintainers'] = [maintainers_model]
helm_repo_list_chart_model_json['name'] = 'testString'
helm_repo_list_chart_model_json['tiller_version'] = 'testString'
helm_repo_list_chart_model_json['urls'] = ['testString']
helm_repo_list_chart_model_json['sources'] = ['testString']
helm_repo_list_chart_model_json['version'] = 'testString'
helm_repo_list_chart_model_json['appVersion'] = 'testString'
# Construct a model instance of HelmRepoListChart by calling from_dict on the json representation
helm_repo_list_chart_model = HelmRepoListChart.from_dict(helm_repo_list_chart_model_json)
assert helm_repo_list_chart_model != False
# Construct a model instance of HelmRepoListChart by calling from_dict on the json representation
helm_repo_list_chart_model_dict = HelmRepoListChart.from_dict(helm_repo_list_chart_model_json).__dict__
helm_repo_list_chart_model2 = HelmRepoListChart(**helm_repo_list_chart_model_dict)
# Verify the model instances are equivalent
assert helm_repo_list_chart_model == helm_repo_list_chart_model2
# Convert model instance back to dict and verify no loss of data
helm_repo_list_chart_model_json2 = helm_repo_list_chart_model.to_dict()
assert helm_repo_list_chart_model_json2 == helm_repo_list_chart_model_json
#-----------------------------------------------------------------------------
# Test Class for IDFilter
#-----------------------------------------------------------------------------
class TestIDFilter():
#--------------------------------------------------------
# Test serialization/deserialization for IDFilter
#--------------------------------------------------------
def test_id_filter_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
filter_terms_model = {} # FilterTerms
filter_terms_model['filter_terms'] = ['testString']
# Construct a json representation of a IDFilter model
id_filter_model_json = {}
id_filter_model_json['include'] = filter_terms_model
id_filter_model_json['exclude'] = filter_terms_model
# Construct a model instance of IDFilter by calling from_dict on the json representation
id_filter_model = IDFilter.from_dict(id_filter_model_json)
assert id_filter_model != False
# Construct a model instance of IDFilter by calling from_dict on the json representation
id_filter_model_dict = IDFilter.from_dict(id_filter_model_json).__dict__
id_filter_model2 = IDFilter(**id_filter_model_dict)
# Verify the model instances are equivalent
assert id_filter_model == id_filter_model2
# Convert model instance back to dict and verify no loss of data
id_filter_model_json2 = id_filter_model.to_dict()
assert id_filter_model_json2 == id_filter_model_json
#-----------------------------------------------------------------------------
# Test Class for Image
#-----------------------------------------------------------------------------
class TestImage():
#--------------------------------------------------------
# Test serialization/deserialization for Image
#--------------------------------------------------------
def test_image_serialization(self):
# Construct a json representation of a Image model
image_model_json = {}
image_model_json['image'] = 'testString'
# Construct a model instance of Image by calling from_dict on the json representation
image_model = Image.from_dict(image_model_json)
assert image_model != False
# Construct a model instance of Image by calling from_dict on the json representation
image_model_dict = Image.from_dict(image_model_json).__dict__
image_model2 = Image(**image_model_dict)
# Verify the model instances are equivalent
assert image_model == image_model2
# Convert model instance back to dict and verify no loss of data
image_model_json2 = image_model.to_dict()
assert image_model_json2 == image_model_json
#-----------------------------------------------------------------------------
# Test Class for ImageManifest
#-----------------------------------------------------------------------------
class TestImageManifest():
#--------------------------------------------------------
# Test serialization/deserialization for ImageManifest
#--------------------------------------------------------
def test_image_manifest_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
image_model = {} # Image
image_model['image'] = 'testString'
# Construct a json representation of a ImageManifest model
image_manifest_model_json = {}
image_manifest_model_json['description'] = 'testString'
image_manifest_model_json['images'] = [image_model]
# Construct a model instance of ImageManifest by calling from_dict on the json representation
image_manifest_model = ImageManifest.from_dict(image_manifest_model_json)
assert image_manifest_model != False
# Construct a model instance of ImageManifest by calling from_dict on the json representation
image_manifest_model_dict = ImageManifest.from_dict(image_manifest_model_json).__dict__
image_manifest_model2 = ImageManifest(**image_manifest_model_dict)
# Verify the model instances are equivalent
assert image_manifest_model == image_manifest_model2
# Convert model instance back to dict and verify no loss of data
image_manifest_model_json2 = image_manifest_model.to_dict()
assert image_manifest_model_json2 == image_manifest_model_json
#-----------------------------------------------------------------------------
# Test Class for InstallStatus
#-----------------------------------------------------------------------------
class TestInstallStatus():
#--------------------------------------------------------
# Test serialization/deserialization for InstallStatus
#--------------------------------------------------------
def test_install_status_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
install_status_content_mgmt_model = {} # InstallStatusContentMgmt
install_status_content_mgmt_model['pods'] = [{ 'foo': 'bar' }]
install_status_content_mgmt_model['errors'] = [{ 'foo': 'bar' }]
install_status_metadata_model = {} # InstallStatusMetadata
install_status_metadata_model['cluster_id'] = 'testString'
install_status_metadata_model['region'] = 'testString'
install_status_metadata_model['namespace'] = 'testString'
install_status_metadata_model['workspace_id'] = 'testString'
install_status_metadata_model['workspace_name'] = 'testString'
install_status_release_model = {} # InstallStatusRelease
install_status_release_model['deployments'] = [{ 'foo': 'bar' }]
install_status_release_model['replicasets'] = [{ 'foo': 'bar' }]
install_status_release_model['statefulsets'] = [{ 'foo': 'bar' }]
install_status_release_model['pods'] = [{ 'foo': 'bar' }]
install_status_release_model['errors'] = [{ 'foo': 'bar' }]
# Construct a json representation of a InstallStatus model
install_status_model_json = {}
install_status_model_json['metadata'] = install_status_metadata_model
install_status_model_json['release'] = install_status_release_model
install_status_model_json['content_mgmt'] = install_status_content_mgmt_model
# Construct a model instance of InstallStatus by calling from_dict on the json representation
install_status_model = InstallStatus.from_dict(install_status_model_json)
assert install_status_model != False
# Construct a model instance of InstallStatus by calling from_dict on the json representation
install_status_model_dict = InstallStatus.from_dict(install_status_model_json).__dict__
install_status_model2 = InstallStatus(**install_status_model_dict)
# Verify the model instances are equivalent
assert install_status_model == install_status_model2
# Convert model instance back to dict and verify no loss of data
install_status_model_json2 = install_status_model.to_dict()
assert install_status_model_json2 == install_status_model_json
#-----------------------------------------------------------------------------
# Test Class for InstallStatusContentMgmt
#-----------------------------------------------------------------------------
class TestInstallStatusContentMgmt():
#--------------------------------------------------------
# Test serialization/deserialization for InstallStatusContentMgmt
#--------------------------------------------------------
def test_install_status_content_mgmt_serialization(self):
# Construct a json representation of a InstallStatusContentMgmt model
install_status_content_mgmt_model_json = {}
install_status_content_mgmt_model_json['pods'] = [{ 'foo': 'bar' }]
install_status_content_mgmt_model_json['errors'] = [{ 'foo': 'bar' }]
# Construct a model instance of InstallStatusContentMgmt by calling from_dict on the json representation
install_status_content_mgmt_model = InstallStatusContentMgmt.from_dict(install_status_content_mgmt_model_json)
assert install_status_content_mgmt_model != False
# Construct a model instance of InstallStatusContentMgmt by calling from_dict on the json representation
install_status_content_mgmt_model_dict = InstallStatusContentMgmt.from_dict(install_status_content_mgmt_model_json).__dict__
install_status_content_mgmt_model2 = InstallStatusContentMgmt(**install_status_content_mgmt_model_dict)
# Verify the model instances are equivalent
assert install_status_content_mgmt_model == install_status_content_mgmt_model2
# Convert model instance back to dict and verify no loss of data
install_status_content_mgmt_model_json2 = install_status_content_mgmt_model.to_dict()
assert install_status_content_mgmt_model_json2 == install_status_content_mgmt_model_json
#-----------------------------------------------------------------------------
# Test Class for InstallStatusMetadata
#-----------------------------------------------------------------------------
class TestInstallStatusMetadata():
#--------------------------------------------------------
# Test serialization/deserialization for InstallStatusMetadata
#--------------------------------------------------------
def test_install_status_metadata_serialization(self):
# Construct a json representation of a InstallStatusMetadata model
install_status_metadata_model_json = {}
install_status_metadata_model_json['cluster_id'] = 'testString'
install_status_metadata_model_json['region'] = 'testString'
install_status_metadata_model_json['namespace'] = 'testString'
install_status_metadata_model_json['workspace_id'] = 'testString'
install_status_metadata_model_json['workspace_name'] = 'testString'
# Construct a model instance of InstallStatusMetadata by calling from_dict on the json representation
install_status_metadata_model = InstallStatusMetadata.from_dict(install_status_metadata_model_json)
assert install_status_metadata_model != False
# Construct a model instance of InstallStatusMetadata by calling from_dict on the json representation
install_status_metadata_model_dict = InstallStatusMetadata.from_dict(install_status_metadata_model_json).__dict__
install_status_metadata_model2 = InstallStatusMetadata(**install_status_metadata_model_dict)
# Verify the model instances are equivalent
assert install_status_metadata_model == install_status_metadata_model2
# Convert model instance back to dict and verify no loss of data
install_status_metadata_model_json2 = install_status_metadata_model.to_dict()
assert install_status_metadata_model_json2 == install_status_metadata_model_json
#-----------------------------------------------------------------------------
# Test Class for InstallStatusRelease
#-----------------------------------------------------------------------------
class TestInstallStatusRelease():
#--------------------------------------------------------
# Test serialization/deserialization for InstallStatusRelease
#--------------------------------------------------------
def test_install_status_release_serialization(self):
# Construct a json representation of a InstallStatusRelease model
install_status_release_model_json = {}
install_status_release_model_json['deployments'] = [{ 'foo': 'bar' }]
install_status_release_model_json['replicasets'] = [{ 'foo': 'bar' }]
install_status_release_model_json['statefulsets'] = [{ 'foo': 'bar' }]
install_status_release_model_json['pods'] = [{ 'foo': 'bar' }]
install_status_release_model_json['errors'] = [{ 'foo': 'bar' }]
# Construct a model instance of InstallStatusRelease by calling from_dict on the json representation
install_status_release_model = InstallStatusRelease.from_dict(install_status_release_model_json)
assert install_status_release_model != False
# Construct a model instance of InstallStatusRelease by calling from_dict on the json representation
install_status_release_model_dict = InstallStatusRelease.from_dict(install_status_release_model_json).__dict__
install_status_release_model2 = InstallStatusRelease(**install_status_release_model_dict)
# Verify the model instances are equivalent
assert install_status_release_model == install_status_release_model2
# Convert model instance back to dict and verify no loss of data
install_status_release_model_json2 = install_status_release_model.to_dict()
assert install_status_release_model_json2 == install_status_release_model_json
#-----------------------------------------------------------------------------
# Test Class for Kind
#-----------------------------------------------------------------------------
class TestKind():
#--------------------------------------------------------
# Test serialization/deserialization for Kind
#--------------------------------------------------------
def test_kind_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
configuration_model = {} # Configuration
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
deployment_model = {} # Deployment
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
license_model = {} # License
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
resource_model = {} # Resource
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
script_model = {} # Script
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
state_model = {} # State
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
validation_model = {} # Validation
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
version_entitlement_model = {} # VersionEntitlement
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
plan_model = {} # Plan
plan_model['id'] = 'testString'
plan_model['label'] = 'testString'
plan_model['name'] = 'testString'
plan_model['short_description'] = 'testString'
plan_model['long_description'] = 'testString'
plan_model['metadata'] = { 'foo': 'bar' }
plan_model['tags'] = ['testString']
plan_model['additional_features'] = [feature_model]
plan_model['created'] = '2020-01-28T18:40:40.123456Z'
plan_model['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model['deployments'] = [deployment_model]
version_model = {} # Version
version_model['id'] = 'testString'
version_model['_rev'] = 'testString'
version_model['crn'] = 'testString'
version_model['version'] = 'testString'
version_model['sha'] = 'testString'
version_model['created'] = '2020-01-28T18:40:40.123456Z'
version_model['updated'] = '2020-01-28T18:40:40.123456Z'
version_model['offering_id'] = 'testString'
version_model['catalog_id'] = 'testString'
version_model['kind_id'] = 'testString'
version_model['tags'] = ['testString']
version_model['repo_url'] = 'testString'
version_model['source_url'] = 'testString'
version_model['tgz_url'] = 'testString'
version_model['configuration'] = [configuration_model]
version_model['metadata'] = { 'foo': 'bar' }
version_model['validation'] = validation_model
version_model['required_resources'] = [resource_model]
version_model['single_instance'] = True
version_model['install'] = script_model
version_model['pre_install'] = [script_model]
version_model['entitlement'] = version_entitlement_model
version_model['licenses'] = [license_model]
version_model['image_manifest_url'] = 'testString'
version_model['deprecated'] = True
version_model['package_version'] = 'testString'
version_model['state'] = state_model
version_model['version_locator'] = 'testString'
version_model['console_url'] = 'testString'
version_model['long_description'] = 'testString'
version_model['whitelisted_accounts'] = ['testString']
# Construct a json representation of a Kind model
kind_model_json = {}
kind_model_json['id'] = 'testString'
kind_model_json['format_kind'] = 'testString'
kind_model_json['target_kind'] = 'testString'
kind_model_json['metadata'] = { 'foo': 'bar' }
kind_model_json['install_description'] = 'testString'
kind_model_json['tags'] = ['testString']
kind_model_json['additional_features'] = [feature_model]
kind_model_json['created'] = '2020-01-28T18:40:40.123456Z'
kind_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
kind_model_json['versions'] = [version_model]
kind_model_json['plans'] = [plan_model]
# Construct a model instance of Kind by calling from_dict on the json representation
kind_model = Kind.from_dict(kind_model_json)
assert kind_model != False
# Construct a model instance of Kind by calling from_dict on the json representation
kind_model_dict = Kind.from_dict(kind_model_json).__dict__
kind_model2 = Kind(**kind_model_dict)
# Verify the model instances are equivalent
assert kind_model == kind_model2
# Convert model instance back to dict and verify no loss of data
kind_model_json2 = kind_model.to_dict()
assert kind_model_json2 == kind_model_json
#-----------------------------------------------------------------------------
# Test Class for License
#-----------------------------------------------------------------------------
class TestLicense():
#--------------------------------------------------------
# Test serialization/deserialization for License
#--------------------------------------------------------
def test_license_serialization(self):
# Construct a json representation of a License model
license_model_json = {}
license_model_json['id'] = 'testString'
license_model_json['name'] = 'testString'
license_model_json['type'] = 'testString'
license_model_json['url'] = 'testString'
license_model_json['description'] = 'testString'
# Construct a model instance of License by calling from_dict on the json representation
license_model = License.from_dict(license_model_json)
assert license_model != False
# Construct a model instance of License by calling from_dict on the json representation
license_model_dict = License.from_dict(license_model_json).__dict__
license_model2 = License(**license_model_dict)
# Verify the model instances are equivalent
assert license_model == license_model2
# Convert model instance back to dict and verify no loss of data
license_model_json2 = license_model.to_dict()
assert license_model_json2 == license_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseEntitlement
#-----------------------------------------------------------------------------
class TestLicenseEntitlement():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseEntitlement
#--------------------------------------------------------
def test_license_entitlement_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
license_entitlement_history_item_model = {} # LicenseEntitlementHistoryItem
license_entitlement_history_item_model['action'] = 'testString'
license_entitlement_history_item_model['user'] = 'testString'
license_entitlement_history_item_model['date'] = 'testString'
license_offering_reference_model = {} # LicenseOfferingReference
license_offering_reference_model['id'] = 'testString'
license_offering_reference_model['name'] = 'testString'
license_offering_reference_model['label'] = 'testString'
license_offering_reference_model['offering_icon_url'] = 'testString'
license_offering_reference_model['account_id'] = 'testString'
license_offering_reference_model['catalog_id'] = 'testString'
# Construct a json representation of a LicenseEntitlement model
license_entitlement_model_json = {}
license_entitlement_model_json['name'] = 'testString'
license_entitlement_model_json['id'] = 'testString'
license_entitlement_model_json['crn'] = 'testString'
license_entitlement_model_json['url'] = 'testString'
license_entitlement_model_json['offering_type'] = 'testString'
license_entitlement_model_json['state'] = 'testString'
license_entitlement_model_json['effective_from'] = 'testString'
license_entitlement_model_json['effective_until'] = 'testString'
license_entitlement_model_json['account_id'] = 'testString'
license_entitlement_model_json['owner_id'] = 'testString'
license_entitlement_model_json['version_id'] = 'testString'
license_entitlement_model_json['license_offering_id'] = 'testString'
license_entitlement_model_json['license_id'] = 'testString'
license_entitlement_model_json['license_owner_id'] = 'testString'
license_entitlement_model_json['license_type'] = 'testString'
license_entitlement_model_json['license_provider_id'] = 'testString'
license_entitlement_model_json['license_provider_url'] = 'testString'
license_entitlement_model_json['license_product_id'] = 'testString'
license_entitlement_model_json['namespace_repository'] = 'testString'
license_entitlement_model_json['apikey'] = 'testString'
license_entitlement_model_json['create_by'] = 'testString'
license_entitlement_model_json['update_by'] = 'testString'
license_entitlement_model_json['create_at'] = 'testString'
license_entitlement_model_json['updated_at'] = 'testString'
license_entitlement_model_json['history'] = [license_entitlement_history_item_model]
license_entitlement_model_json['offering_list'] = [license_offering_reference_model]
# Construct a model instance of LicenseEntitlement by calling from_dict on the json representation
license_entitlement_model = LicenseEntitlement.from_dict(license_entitlement_model_json)
assert license_entitlement_model != False
# Construct a model instance of LicenseEntitlement by calling from_dict on the json representation
license_entitlement_model_dict = LicenseEntitlement.from_dict(license_entitlement_model_json).__dict__
license_entitlement_model2 = LicenseEntitlement(**license_entitlement_model_dict)
# Verify the model instances are equivalent
assert license_entitlement_model == license_entitlement_model2
# Convert model instance back to dict and verify no loss of data
license_entitlement_model_json2 = license_entitlement_model.to_dict()
assert license_entitlement_model_json2 == license_entitlement_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseEntitlementHistoryItem
#-----------------------------------------------------------------------------
class TestLicenseEntitlementHistoryItem():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseEntitlementHistoryItem
#--------------------------------------------------------
def test_license_entitlement_history_item_serialization(self):
# Construct a json representation of a LicenseEntitlementHistoryItem model
license_entitlement_history_item_model_json = {}
license_entitlement_history_item_model_json['action'] = 'testString'
license_entitlement_history_item_model_json['user'] = 'testString'
license_entitlement_history_item_model_json['date'] = 'testString'
# Construct a model instance of LicenseEntitlementHistoryItem by calling from_dict on the json representation
license_entitlement_history_item_model = LicenseEntitlementHistoryItem.from_dict(license_entitlement_history_item_model_json)
assert license_entitlement_history_item_model != False
# Construct a model instance of LicenseEntitlementHistoryItem by calling from_dict on the json representation
license_entitlement_history_item_model_dict = LicenseEntitlementHistoryItem.from_dict(license_entitlement_history_item_model_json).__dict__
license_entitlement_history_item_model2 = LicenseEntitlementHistoryItem(**license_entitlement_history_item_model_dict)
# Verify the model instances are equivalent
assert license_entitlement_history_item_model == license_entitlement_history_item_model2
# Convert model instance back to dict and verify no loss of data
license_entitlement_history_item_model_json2 = license_entitlement_history_item_model.to_dict()
assert license_entitlement_history_item_model_json2 == license_entitlement_history_item_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseEntitlements
#-----------------------------------------------------------------------------
class TestLicenseEntitlements():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseEntitlements
#--------------------------------------------------------
def test_license_entitlements_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
license_entitlement_history_item_model = {} # LicenseEntitlementHistoryItem
license_entitlement_history_item_model['action'] = 'testString'
license_entitlement_history_item_model['user'] = 'testString'
license_entitlement_history_item_model['date'] = 'testString'
license_offering_reference_model = {} # LicenseOfferingReference
license_offering_reference_model['id'] = 'testString'
license_offering_reference_model['name'] = 'testString'
license_offering_reference_model['label'] = 'testString'
license_offering_reference_model['offering_icon_url'] = 'testString'
license_offering_reference_model['account_id'] = 'testString'
license_offering_reference_model['catalog_id'] = 'testString'
license_entitlement_model = {} # LicenseEntitlement
license_entitlement_model['name'] = 'testString'
license_entitlement_model['id'] = 'testString'
license_entitlement_model['crn'] = 'testString'
license_entitlement_model['url'] = 'testString'
license_entitlement_model['offering_type'] = 'testString'
license_entitlement_model['state'] = 'testString'
license_entitlement_model['effective_from'] = 'testString'
license_entitlement_model['effective_until'] = 'testString'
license_entitlement_model['account_id'] = 'testString'
license_entitlement_model['owner_id'] = 'testString'
license_entitlement_model['version_id'] = 'testString'
license_entitlement_model['license_offering_id'] = 'testString'
license_entitlement_model['license_id'] = 'testString'
license_entitlement_model['license_owner_id'] = 'testString'
license_entitlement_model['license_type'] = 'testString'
license_entitlement_model['license_provider_id'] = 'testString'
license_entitlement_model['license_provider_url'] = 'testString'
license_entitlement_model['license_product_id'] = 'testString'
license_entitlement_model['namespace_repository'] = 'testString'
license_entitlement_model['apikey'] = 'testString'
license_entitlement_model['create_by'] = 'testString'
license_entitlement_model['update_by'] = 'testString'
license_entitlement_model['create_at'] = 'testString'
license_entitlement_model['updated_at'] = 'testString'
license_entitlement_model['history'] = [license_entitlement_history_item_model]
license_entitlement_model['offering_list'] = [license_offering_reference_model]
# Construct a json representation of a LicenseEntitlements model
license_entitlements_model_json = {}
license_entitlements_model_json['total_results'] = 38
license_entitlements_model_json['total_pages'] = 38
license_entitlements_model_json['prev_url'] = 'testString'
license_entitlements_model_json['next_url'] = 'testString'
license_entitlements_model_json['resources'] = [license_entitlement_model]
# Construct a model instance of LicenseEntitlements by calling from_dict on the json representation
license_entitlements_model = LicenseEntitlements.from_dict(license_entitlements_model_json)
assert license_entitlements_model != False
# Construct a model instance of LicenseEntitlements by calling from_dict on the json representation
license_entitlements_model_dict = LicenseEntitlements.from_dict(license_entitlements_model_json).__dict__
license_entitlements_model2 = LicenseEntitlements(**license_entitlements_model_dict)
# Verify the model instances are equivalent
assert license_entitlements_model == license_entitlements_model2
# Convert model instance back to dict and verify no loss of data
license_entitlements_model_json2 = license_entitlements_model.to_dict()
assert license_entitlements_model_json2 == license_entitlements_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseObject
#-----------------------------------------------------------------------------
class TestLicenseObject():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseObject
#--------------------------------------------------------
def test_license_object_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
license_offering_reference_model = {} # LicenseOfferingReference
license_offering_reference_model['id'] = 'testString'
license_offering_reference_model['name'] = 'testString'
license_offering_reference_model['label'] = 'testString'
license_offering_reference_model['offering_icon_url'] = 'testString'
license_offering_reference_model['account_id'] = 'testString'
license_offering_reference_model['catalog_id'] = 'testString'
# Construct a json representation of a LicenseObject model
license_object_model_json = {}
license_object_model_json['name'] = 'testString'
license_object_model_json['offering_type'] = 'testString'
license_object_model_json['seats_allowed'] = 'testString'
license_object_model_json['seats_used'] = 'testString'
license_object_model_json['owner_id'] = 'testString'
license_object_model_json['license_offering_id'] = 'testString'
license_object_model_json['license_id'] = 'testString'
license_object_model_json['license_owner_id'] = 'testString'
license_object_model_json['license_type'] = 'testString'
license_object_model_json['license_provider_id'] = 'testString'
license_object_model_json['license_product_id'] = 'testString'
license_object_model_json['license_provider_url'] = 'testString'
license_object_model_json['effective_from'] = 'testString'
license_object_model_json['effective_until'] = 'testString'
license_object_model_json['internal'] = True
license_object_model_json['offering_list'] = [license_offering_reference_model]
# Construct a model instance of LicenseObject by calling from_dict on the json representation
license_object_model = LicenseObject.from_dict(license_object_model_json)
assert license_object_model != False
# Construct a model instance of LicenseObject by calling from_dict on the json representation
license_object_model_dict = LicenseObject.from_dict(license_object_model_json).__dict__
license_object_model2 = LicenseObject(**license_object_model_dict)
# Verify the model instances are equivalent
assert license_object_model == license_object_model2
# Convert model instance back to dict and verify no loss of data
license_object_model_json2 = license_object_model.to_dict()
assert license_object_model_json2 == license_object_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseOfferingReference
#-----------------------------------------------------------------------------
class TestLicenseOfferingReference():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseOfferingReference
#--------------------------------------------------------
def test_license_offering_reference_serialization(self):
# Construct a json representation of a LicenseOfferingReference model
license_offering_reference_model_json = {}
license_offering_reference_model_json['id'] = 'testString'
license_offering_reference_model_json['name'] = 'testString'
license_offering_reference_model_json['label'] = 'testString'
license_offering_reference_model_json['offering_icon_url'] = 'testString'
license_offering_reference_model_json['account_id'] = 'testString'
license_offering_reference_model_json['catalog_id'] = 'testString'
# Construct a model instance of LicenseOfferingReference by calling from_dict on the json representation
license_offering_reference_model = LicenseOfferingReference.from_dict(license_offering_reference_model_json)
assert license_offering_reference_model != False
# Construct a model instance of LicenseOfferingReference by calling from_dict on the json representation
license_offering_reference_model_dict = LicenseOfferingReference.from_dict(license_offering_reference_model_json).__dict__
license_offering_reference_model2 = LicenseOfferingReference(**license_offering_reference_model_dict)
# Verify the model instances are equivalent
assert license_offering_reference_model == license_offering_reference_model2
# Convert model instance back to dict and verify no loss of data
license_offering_reference_model_json2 = license_offering_reference_model.to_dict()
assert license_offering_reference_model_json2 == license_offering_reference_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseProvider
#-----------------------------------------------------------------------------
class TestLicenseProvider():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseProvider
#--------------------------------------------------------
def test_license_provider_serialization(self):
# Construct a json representation of a LicenseProvider model
license_provider_model_json = {}
license_provider_model_json['name'] = 'testString'
license_provider_model_json['short_description'] = 'testString'
license_provider_model_json['id'] = 'testString'
license_provider_model_json['licence_type'] = 'testString'
license_provider_model_json['offering_type'] = 'testString'
license_provider_model_json['create_url'] = 'testString'
license_provider_model_json['info_url'] = 'testString'
license_provider_model_json['url'] = 'testString'
license_provider_model_json['crn'] = 'testString'
license_provider_model_json['state'] = 'testString'
# Construct a model instance of LicenseProvider by calling from_dict on the json representation
license_provider_model = LicenseProvider.from_dict(license_provider_model_json)
assert license_provider_model != False
# Construct a model instance of LicenseProvider by calling from_dict on the json representation
license_provider_model_dict = LicenseProvider.from_dict(license_provider_model_json).__dict__
license_provider_model2 = LicenseProvider(**license_provider_model_dict)
# Verify the model instances are equivalent
assert license_provider_model == license_provider_model2
# Convert model instance back to dict and verify no loss of data
license_provider_model_json2 = license_provider_model.to_dict()
assert license_provider_model_json2 == license_provider_model_json
#-----------------------------------------------------------------------------
# Test Class for LicenseProviders
#-----------------------------------------------------------------------------
class TestLicenseProviders():
#--------------------------------------------------------
# Test serialization/deserialization for LicenseProviders
#--------------------------------------------------------
def test_license_providers_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
license_provider_model = {} # LicenseProvider
license_provider_model['name'] = 'testString'
license_provider_model['short_description'] = 'testString'
license_provider_model['id'] = 'testString'
license_provider_model['licence_type'] = 'testString'
license_provider_model['offering_type'] = 'testString'
license_provider_model['create_url'] = 'testString'
license_provider_model['info_url'] = 'testString'
license_provider_model['url'] = 'testString'
license_provider_model['crn'] = 'testString'
license_provider_model['state'] = 'testString'
# Construct a json representation of a LicenseProviders model
license_providers_model_json = {}
license_providers_model_json['total_results'] = 38
license_providers_model_json['total_pages'] = 38
license_providers_model_json['prev_url'] = 'testString'
license_providers_model_json['next_url'] = 'testString'
license_providers_model_json['resources'] = [license_provider_model]
# Construct a model instance of LicenseProviders by calling from_dict on the json representation
license_providers_model = LicenseProviders.from_dict(license_providers_model_json)
assert license_providers_model != False
# Construct a model instance of LicenseProviders by calling from_dict on the json representation
license_providers_model_dict = LicenseProviders.from_dict(license_providers_model_json).__dict__
license_providers_model2 = LicenseProviders(**license_providers_model_dict)
# Verify the model instances are equivalent
assert license_providers_model == license_providers_model2
# Convert model instance back to dict and verify no loss of data
license_providers_model_json2 = license_providers_model.to_dict()
assert license_providers_model_json2 == license_providers_model_json
#-----------------------------------------------------------------------------
# Test Class for Licenses
#-----------------------------------------------------------------------------
class TestLicenses():
#--------------------------------------------------------
# Test serialization/deserialization for Licenses
#--------------------------------------------------------
def test_licenses_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
license_offering_reference_model = {} # LicenseOfferingReference
license_offering_reference_model['id'] = 'testString'
license_offering_reference_model['name'] = 'testString'
license_offering_reference_model['label'] = 'testString'
license_offering_reference_model['offering_icon_url'] = 'testString'
license_offering_reference_model['account_id'] = 'testString'
license_offering_reference_model['catalog_id'] = 'testString'
license_object_model = {} # LicenseObject
license_object_model['name'] = 'testString'
license_object_model['offering_type'] = 'testString'
license_object_model['seats_allowed'] = 'testString'
license_object_model['seats_used'] = 'testString'
license_object_model['owner_id'] = 'testString'
license_object_model['license_offering_id'] = 'testString'
license_object_model['license_id'] = 'testString'
license_object_model['license_owner_id'] = 'testString'
license_object_model['license_type'] = 'testString'
license_object_model['license_provider_id'] = 'testString'
license_object_model['license_product_id'] = 'testString'
license_object_model['license_provider_url'] = 'testString'
license_object_model['effective_from'] = 'testString'
license_object_model['effective_until'] = 'testString'
license_object_model['internal'] = True
license_object_model['offering_list'] = [license_offering_reference_model]
# Construct a json representation of a Licenses model
licenses_model_json = {}
licenses_model_json['total_results'] = 38
licenses_model_json['total_pages'] = 38
licenses_model_json['prev_url'] = 'testString'
licenses_model_json['next_url'] = 'testString'
licenses_model_json['resources'] = [license_object_model]
# Construct a model instance of Licenses by calling from_dict on the json representation
licenses_model = Licenses.from_dict(licenses_model_json)
assert licenses_model != False
# Construct a model instance of Licenses by calling from_dict on the json representation
licenses_model_dict = Licenses.from_dict(licenses_model_json).__dict__
licenses_model2 = Licenses(**licenses_model_dict)
# Verify the model instances are equivalent
assert licenses_model == licenses_model2
# Convert model instance back to dict and verify no loss of data
licenses_model_json2 = licenses_model.to_dict()
assert licenses_model_json2 == licenses_model_json
#-----------------------------------------------------------------------------
# Test Class for Maintainers
#-----------------------------------------------------------------------------
class TestMaintainers():
#--------------------------------------------------------
# Test serialization/deserialization for Maintainers
#--------------------------------------------------------
def test_maintainers_serialization(self):
# Construct a json representation of a Maintainers model
maintainers_model_json = {}
maintainers_model_json['email'] = 'testString'
maintainers_model_json['name'] = 'testString'
# Construct a model instance of Maintainers by calling from_dict on the json representation
maintainers_model = Maintainers.from_dict(maintainers_model_json)
assert maintainers_model != False
# Construct a model instance of Maintainers by calling from_dict on the json representation
maintainers_model_dict = Maintainers.from_dict(maintainers_model_json).__dict__
maintainers_model2 = Maintainers(**maintainers_model_dict)
# Verify the model instances are equivalent
assert maintainers_model == maintainers_model2
# Convert model instance back to dict and verify no loss of data
maintainers_model_json2 = maintainers_model.to_dict()
assert maintainers_model_json2 == maintainers_model_json
#-----------------------------------------------------------------------------
# Test Class for NamespaceSearchResult
#-----------------------------------------------------------------------------
class TestNamespaceSearchResult():
#--------------------------------------------------------
# Test serialization/deserialization for NamespaceSearchResult
#--------------------------------------------------------
def test_namespace_search_result_serialization(self):
# Construct a json representation of a NamespaceSearchResult model
namespace_search_result_model_json = {}
namespace_search_result_model_json['offset'] = 38
namespace_search_result_model_json['limit'] = 38
namespace_search_result_model_json['total_count'] = 38
namespace_search_result_model_json['resource_count'] = 38
namespace_search_result_model_json['first'] = 'testString'
namespace_search_result_model_json['last'] = 'testString'
namespace_search_result_model_json['prev'] = 'testString'
namespace_search_result_model_json['next'] = 'testString'
namespace_search_result_model_json['resources'] = ['testString']
# Construct a model instance of NamespaceSearchResult by calling from_dict on the json representation
namespace_search_result_model = NamespaceSearchResult.from_dict(namespace_search_result_model_json)
assert namespace_search_result_model != False
# Construct a model instance of NamespaceSearchResult by calling from_dict on the json representation
namespace_search_result_model_dict = NamespaceSearchResult.from_dict(namespace_search_result_model_json).__dict__
namespace_search_result_model2 = NamespaceSearchResult(**namespace_search_result_model_dict)
# Verify the model instances are equivalent
assert namespace_search_result_model == namespace_search_result_model2
# Convert model instance back to dict and verify no loss of data
namespace_search_result_model_json2 = namespace_search_result_model.to_dict()
assert namespace_search_result_model_json2 == namespace_search_result_model_json
#-----------------------------------------------------------------------------
# Test Class for Offering
#-----------------------------------------------------------------------------
class TestOffering():
#--------------------------------------------------------
# Test serialization/deserialization for Offering
#--------------------------------------------------------
def test_offering_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
configuration_model = {} # Configuration
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
deployment_model = {} # Deployment
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
license_model = {} # License
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
resource_model = {} # Resource
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
script_model = {} # Script
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
state_model = {} # State
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
validation_model = {} # Validation
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
version_entitlement_model = {} # VersionEntitlement
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
plan_model = {} # Plan
plan_model['id'] = 'testString'
plan_model['label'] = 'testString'
plan_model['name'] = 'testString'
plan_model['short_description'] = 'testString'
plan_model['long_description'] = 'testString'
plan_model['metadata'] = { 'foo': 'bar' }
plan_model['tags'] = ['testString']
plan_model['additional_features'] = [feature_model]
plan_model['created'] = '2020-01-28T18:40:40.123456Z'
plan_model['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model['deployments'] = [deployment_model]
version_model = {} # Version
version_model['id'] = 'testString'
version_model['_rev'] = 'testString'
version_model['crn'] = 'testString'
version_model['version'] = 'testString'
version_model['sha'] = 'testString'
version_model['created'] = '2020-01-28T18:40:40.123456Z'
version_model['updated'] = '2020-01-28T18:40:40.123456Z'
version_model['offering_id'] = 'testString'
version_model['catalog_id'] = 'testString'
version_model['kind_id'] = 'testString'
version_model['tags'] = ['testString']
version_model['repo_url'] = 'testString'
version_model['source_url'] = 'testString'
version_model['tgz_url'] = 'testString'
version_model['configuration'] = [configuration_model]
version_model['metadata'] = { 'foo': 'bar' }
version_model['validation'] = validation_model
version_model['required_resources'] = [resource_model]
version_model['single_instance'] = True
version_model['install'] = script_model
version_model['pre_install'] = [script_model]
version_model['entitlement'] = version_entitlement_model
version_model['licenses'] = [license_model]
version_model['image_manifest_url'] = 'testString'
version_model['deprecated'] = True
version_model['package_version'] = 'testString'
version_model['state'] = state_model
version_model['version_locator'] = 'testString'
version_model['console_url'] = 'testString'
version_model['long_description'] = 'testString'
version_model['whitelisted_accounts'] = ['testString']
kind_model = {} # Kind
kind_model['id'] = 'testString'
kind_model['format_kind'] = 'testString'
kind_model['target_kind'] = 'testString'
kind_model['metadata'] = { 'foo': 'bar' }
kind_model['install_description'] = 'testString'
kind_model['tags'] = ['testString']
kind_model['additional_features'] = [feature_model]
kind_model['created'] = '2020-01-28T18:40:40.123456Z'
kind_model['updated'] = '2020-01-28T18:40:40.123456Z'
kind_model['versions'] = [version_model]
kind_model['plans'] = [plan_model]
rating_model = {} # Rating
rating_model['one_star_count'] = 38
rating_model['two_star_count'] = 38
rating_model['three_star_count'] = 38
rating_model['four_star_count'] = 38
repo_info_model = {} # RepoInfo
repo_info_model['token'] = 'testString'
repo_info_model['type'] = 'testString'
# Construct a json representation of a Offering model
offering_model_json = {}
offering_model_json['id'] = 'testString'
offering_model_json['_rev'] = 'testString'
offering_model_json['url'] = 'testString'
offering_model_json['crn'] = 'testString'
offering_model_json['label'] = 'testString'
offering_model_json['name'] = 'testString'
offering_model_json['offering_icon_url'] = 'testString'
offering_model_json['offering_docs_url'] = 'testString'
offering_model_json['offering_support_url'] = 'testString'
offering_model_json['tags'] = ['testString']
offering_model_json['rating'] = rating_model
offering_model_json['created'] = '2020-01-28T18:40:40.123456Z'
offering_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
offering_model_json['short_description'] = 'testString'
offering_model_json['long_description'] = 'testString'
offering_model_json['features'] = [feature_model]
offering_model_json['kinds'] = [kind_model]
offering_model_json['permit_request_ibm_public_publish'] = True
offering_model_json['ibm_publish_approved'] = True
offering_model_json['public_publish_approved'] = True
offering_model_json['public_original_crn'] = 'testString'
offering_model_json['publish_public_crn'] = 'testString'
offering_model_json['portal_approval_record'] = 'testString'
offering_model_json['portal_ui_url'] = 'testString'
offering_model_json['catalog_id'] = 'testString'
offering_model_json['catalog_name'] = 'testString'
offering_model_json['metadata'] = { 'foo': 'bar' }
offering_model_json['disclaimer'] = 'testString'
offering_model_json['hidden'] = True
offering_model_json['provider'] = 'testString'
offering_model_json['repo_info'] = repo_info_model
# Construct a model instance of Offering by calling from_dict on the json representation
offering_model = Offering.from_dict(offering_model_json)
assert offering_model != False
# Construct a model instance of Offering by calling from_dict on the json representation
offering_model_dict = Offering.from_dict(offering_model_json).__dict__
offering_model2 = Offering(**offering_model_dict)
# Verify the model instances are equivalent
assert offering_model == offering_model2
# Convert model instance back to dict and verify no loss of data
offering_model_json2 = offering_model.to_dict()
assert offering_model_json2 == offering_model_json
#-----------------------------------------------------------------------------
# Test Class for OfferingSearchResult
#-----------------------------------------------------------------------------
class TestOfferingSearchResult():
#--------------------------------------------------------
# Test serialization/deserialization for OfferingSearchResult
#--------------------------------------------------------
def test_offering_search_result_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
configuration_model = {} # Configuration
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
deployment_model = {} # Deployment
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
license_model = {} # License
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
resource_model = {} # Resource
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
script_model = {} # Script
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
state_model = {} # State
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
validation_model = {} # Validation
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
version_entitlement_model = {} # VersionEntitlement
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
plan_model = {} # Plan
plan_model['id'] = 'testString'
plan_model['label'] = 'testString'
plan_model['name'] = 'testString'
plan_model['short_description'] = 'testString'
plan_model['long_description'] = 'testString'
plan_model['metadata'] = { 'foo': 'bar' }
plan_model['tags'] = ['testString']
plan_model['additional_features'] = [feature_model]
plan_model['created'] = '2020-01-28T18:40:40.123456Z'
plan_model['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model['deployments'] = [deployment_model]
version_model = {} # Version
version_model['id'] = 'testString'
version_model['_rev'] = 'testString'
version_model['crn'] = 'testString'
version_model['version'] = 'testString'
version_model['sha'] = 'testString'
version_model['created'] = '2020-01-28T18:40:40.123456Z'
version_model['updated'] = '2020-01-28T18:40:40.123456Z'
version_model['offering_id'] = 'testString'
version_model['catalog_id'] = 'testString'
version_model['kind_id'] = 'testString'
version_model['tags'] = ['testString']
version_model['repo_url'] = 'testString'
version_model['source_url'] = 'testString'
version_model['tgz_url'] = 'testString'
version_model['configuration'] = [configuration_model]
version_model['metadata'] = { 'foo': 'bar' }
version_model['validation'] = validation_model
version_model['required_resources'] = [resource_model]
version_model['single_instance'] = True
version_model['install'] = script_model
version_model['pre_install'] = [script_model]
version_model['entitlement'] = version_entitlement_model
version_model['licenses'] = [license_model]
version_model['image_manifest_url'] = 'testString'
version_model['deprecated'] = True
version_model['package_version'] = 'testString'
version_model['state'] = state_model
version_model['version_locator'] = 'testString'
version_model['console_url'] = 'testString'
version_model['long_description'] = 'testString'
version_model['whitelisted_accounts'] = ['testString']
kind_model = {} # Kind
kind_model['id'] = 'testString'
kind_model['format_kind'] = 'testString'
kind_model['target_kind'] = 'testString'
kind_model['metadata'] = { 'foo': 'bar' }
kind_model['install_description'] = 'testString'
kind_model['tags'] = ['testString']
kind_model['additional_features'] = [feature_model]
kind_model['created'] = '2020-01-28T18:40:40.123456Z'
kind_model['updated'] = '2020-01-28T18:40:40.123456Z'
kind_model['versions'] = [version_model]
kind_model['plans'] = [plan_model]
rating_model = {} # Rating
rating_model['one_star_count'] = 38
rating_model['two_star_count'] = 38
rating_model['three_star_count'] = 38
rating_model['four_star_count'] = 38
repo_info_model = {} # RepoInfo
repo_info_model['token'] = 'testString'
repo_info_model['type'] = 'testString'
offering_model = {} # Offering
offering_model['id'] = 'testString'
offering_model['_rev'] = 'testString'
offering_model['url'] = 'testString'
offering_model['crn'] = 'testString'
offering_model['label'] = 'testString'
offering_model['name'] = 'testString'
offering_model['offering_icon_url'] = 'testString'
offering_model['offering_docs_url'] = 'testString'
offering_model['offering_support_url'] = 'testString'
offering_model['tags'] = ['testString']
offering_model['rating'] = rating_model
offering_model['created'] = '2020-01-28T18:40:40.123456Z'
offering_model['updated'] = '2020-01-28T18:40:40.123456Z'
offering_model['short_description'] = 'testString'
offering_model['long_description'] = 'testString'
offering_model['features'] = [feature_model]
offering_model['kinds'] = [kind_model]
offering_model['permit_request_ibm_public_publish'] = True
offering_model['ibm_publish_approved'] = True
offering_model['public_publish_approved'] = True
offering_model['public_original_crn'] = 'testString'
offering_model['publish_public_crn'] = 'testString'
offering_model['portal_approval_record'] = 'testString'
offering_model['portal_ui_url'] = 'testString'
offering_model['catalog_id'] = 'testString'
offering_model['catalog_name'] = 'testString'
offering_model['metadata'] = { 'foo': 'bar' }
offering_model['disclaimer'] = 'testString'
offering_model['hidden'] = True
offering_model['provider'] = 'testString'
offering_model['repo_info'] = repo_info_model
# Construct a json representation of a OfferingSearchResult model
offering_search_result_model_json = {}
offering_search_result_model_json['offset'] = 38
offering_search_result_model_json['limit'] = 38
offering_search_result_model_json['total_count'] = 38
offering_search_result_model_json['resource_count'] = 38
offering_search_result_model_json['first'] = 'testString'
offering_search_result_model_json['last'] = 'testString'
offering_search_result_model_json['prev'] = 'testString'
offering_search_result_model_json['next'] = 'testString'
offering_search_result_model_json['resources'] = [offering_model]
# Construct a model instance of OfferingSearchResult by calling from_dict on the json representation
offering_search_result_model = OfferingSearchResult.from_dict(offering_search_result_model_json)
assert offering_search_result_model != False
# Construct a model instance of OfferingSearchResult by calling from_dict on the json representation
offering_search_result_model_dict = OfferingSearchResult.from_dict(offering_search_result_model_json).__dict__
offering_search_result_model2 = OfferingSearchResult(**offering_search_result_model_dict)
# Verify the model instances are equivalent
assert offering_search_result_model == offering_search_result_model2
# Convert model instance back to dict and verify no loss of data
offering_search_result_model_json2 = offering_search_result_model.to_dict()
assert offering_search_result_model_json2 == offering_search_result_model_json
#-----------------------------------------------------------------------------
# Test Class for OperatorDeployResult
#-----------------------------------------------------------------------------
class TestOperatorDeployResult():
#--------------------------------------------------------
# Test serialization/deserialization for OperatorDeployResult
#--------------------------------------------------------
def test_operator_deploy_result_serialization(self):
# Construct a json representation of a OperatorDeployResult model
operator_deploy_result_model_json = {}
operator_deploy_result_model_json['phase'] = 'testString'
operator_deploy_result_model_json['message'] = 'testString'
operator_deploy_result_model_json['link'] = 'testString'
operator_deploy_result_model_json['name'] = 'testString'
operator_deploy_result_model_json['version'] = 'testString'
operator_deploy_result_model_json['namespace'] = 'testString'
operator_deploy_result_model_json['package_name'] = 'testString'
operator_deploy_result_model_json['catalog_id'] = 'testString'
# Construct a model instance of OperatorDeployResult by calling from_dict on the json representation
operator_deploy_result_model = OperatorDeployResult.from_dict(operator_deploy_result_model_json)
assert operator_deploy_result_model != False
# Construct a model instance of OperatorDeployResult by calling from_dict on the json representation
operator_deploy_result_model_dict = OperatorDeployResult.from_dict(operator_deploy_result_model_json).__dict__
operator_deploy_result_model2 = OperatorDeployResult(**operator_deploy_result_model_dict)
# Verify the model instances are equivalent
assert operator_deploy_result_model == operator_deploy_result_model2
# Convert model instance back to dict and verify no loss of data
operator_deploy_result_model_json2 = operator_deploy_result_model.to_dict()
assert operator_deploy_result_model_json2 == operator_deploy_result_model_json
#-----------------------------------------------------------------------------
# Test Class for Plan
#-----------------------------------------------------------------------------
class TestPlan():
#--------------------------------------------------------
# Test serialization/deserialization for Plan
#--------------------------------------------------------
def test_plan_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
deployment_model = {} # Deployment
deployment_model['id'] = 'testString'
deployment_model['label'] = 'testString'
deployment_model['name'] = 'testString'
deployment_model['short_description'] = 'testString'
deployment_model['long_description'] = 'testString'
deployment_model['metadata'] = { 'foo': 'bar' }
deployment_model['tags'] = ['testString']
deployment_model['created'] = '2020-01-28T18:40:40.123456Z'
deployment_model['updated'] = '2020-01-28T18:40:40.123456Z'
feature_model = {} # Feature
feature_model['title'] = 'testString'
feature_model['description'] = 'testString'
# Construct a json representation of a Plan model
plan_model_json = {}
plan_model_json['id'] = 'testString'
plan_model_json['label'] = 'testString'
plan_model_json['name'] = 'testString'
plan_model_json['short_description'] = 'testString'
plan_model_json['long_description'] = 'testString'
plan_model_json['metadata'] = { 'foo': 'bar' }
plan_model_json['tags'] = ['testString']
plan_model_json['additional_features'] = [feature_model]
plan_model_json['created'] = '2020-01-28T18:40:40.123456Z'
plan_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
plan_model_json['deployments'] = [deployment_model]
# Construct a model instance of Plan by calling from_dict on the json representation
plan_model = Plan.from_dict(plan_model_json)
assert plan_model != False
# Construct a model instance of Plan by calling from_dict on the json representation
plan_model_dict = Plan.from_dict(plan_model_json).__dict__
plan_model2 = Plan(**plan_model_dict)
# Verify the model instances are equivalent
assert plan_model == plan_model2
# Convert model instance back to dict and verify no loss of data
plan_model_json2 = plan_model.to_dict()
assert plan_model_json2 == plan_model_json
#-----------------------------------------------------------------------------
# Test Class for Rating
#-----------------------------------------------------------------------------
class TestRating():
#--------------------------------------------------------
# Test serialization/deserialization for Rating
#--------------------------------------------------------
def test_rating_serialization(self):
# Construct a json representation of a Rating model
rating_model_json = {}
rating_model_json['one_star_count'] = 38
rating_model_json['two_star_count'] = 38
rating_model_json['three_star_count'] = 38
rating_model_json['four_star_count'] = 38
# Construct a model instance of Rating by calling from_dict on the json representation
rating_model = Rating.from_dict(rating_model_json)
assert rating_model != False
# Construct a model instance of Rating by calling from_dict on the json representation
rating_model_dict = Rating.from_dict(rating_model_json).__dict__
rating_model2 = Rating(**rating_model_dict)
# Verify the model instances are equivalent
assert rating_model == rating_model2
# Convert model instance back to dict and verify no loss of data
rating_model_json2 = rating_model.to_dict()
assert rating_model_json2 == rating_model_json
#-----------------------------------------------------------------------------
# Test Class for RepoInfo
#-----------------------------------------------------------------------------
class TestRepoInfo():
#--------------------------------------------------------
# Test serialization/deserialization for RepoInfo
#--------------------------------------------------------
def test_repo_info_serialization(self):
# Construct a json representation of a RepoInfo model
repo_info_model_json = {}
repo_info_model_json['token'] = 'testString'
repo_info_model_json['type'] = 'testString'
# Construct a model instance of RepoInfo by calling from_dict on the json representation
repo_info_model = RepoInfo.from_dict(repo_info_model_json)
assert repo_info_model != False
# Construct a model instance of RepoInfo by calling from_dict on the json representation
repo_info_model_dict = RepoInfo.from_dict(repo_info_model_json).__dict__
repo_info_model2 = RepoInfo(**repo_info_model_dict)
# Verify the model instances are equivalent
assert repo_info_model == repo_info_model2
# Convert model instance back to dict and verify no loss of data
repo_info_model_json2 = repo_info_model.to_dict()
assert repo_info_model_json2 == repo_info_model_json
#-----------------------------------------------------------------------------
# Test Class for Resource
#-----------------------------------------------------------------------------
class TestResource():
#--------------------------------------------------------
# Test serialization/deserialization for Resource
#--------------------------------------------------------
def test_resource_serialization(self):
# Construct a json representation of a Resource model
resource_model_json = {}
resource_model_json['type'] = 'mem'
resource_model_json['value'] = { 'foo': 'bar' }
# Construct a model instance of Resource by calling from_dict on the json representation
resource_model = Resource.from_dict(resource_model_json)
assert resource_model != False
# Construct a model instance of Resource by calling from_dict on the json representation
resource_model_dict = Resource.from_dict(resource_model_json).__dict__
resource_model2 = Resource(**resource_model_dict)
# Verify the model instances are equivalent
assert resource_model == resource_model2
# Convert model instance back to dict and verify no loss of data
resource_model_json2 = resource_model.to_dict()
assert resource_model_json2 == resource_model_json
#-----------------------------------------------------------------------------
# Test Class for ResourceGroup
#-----------------------------------------------------------------------------
class TestResourceGroup():
#--------------------------------------------------------
# Test serialization/deserialization for ResourceGroup
#--------------------------------------------------------
def test_resource_group_serialization(self):
# Construct a json representation of a ResourceGroup model
resource_group_model_json = {}
resource_group_model_json['id'] = 'testString'
resource_group_model_json['name'] = 'testString'
resource_group_model_json['crn'] = 'testString'
resource_group_model_json['account_id'] = 'testString'
resource_group_model_json['state'] = 'testString'
resource_group_model_json['default'] = True
# Construct a model instance of ResourceGroup by calling from_dict on the json representation
resource_group_model = ResourceGroup.from_dict(resource_group_model_json)
assert resource_group_model != False
# Construct a model instance of ResourceGroup by calling from_dict on the json representation
resource_group_model_dict = ResourceGroup.from_dict(resource_group_model_json).__dict__
resource_group_model2 = ResourceGroup(**resource_group_model_dict)
# Verify the model instances are equivalent
assert resource_group_model == resource_group_model2
# Convert model instance back to dict and verify no loss of data
resource_group_model_json2 = resource_group_model.to_dict()
assert resource_group_model_json2 == resource_group_model_json
#-----------------------------------------------------------------------------
# Test Class for ResourceGroups
#-----------------------------------------------------------------------------
class TestResourceGroups():
#--------------------------------------------------------
# Test serialization/deserialization for ResourceGroups
#--------------------------------------------------------
def test_resource_groups_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
resource_group_model = {} # ResourceGroup
resource_group_model['id'] = 'testString'
resource_group_model['name'] = 'testString'
resource_group_model['crn'] = 'testString'
resource_group_model['account_id'] = 'testString'
resource_group_model['state'] = 'testString'
resource_group_model['default'] = True
# Construct a json representation of a ResourceGroups model
resource_groups_model_json = {}
resource_groups_model_json['offset'] = 38
resource_groups_model_json['limit'] = 38
resource_groups_model_json['total_count'] = 38
resource_groups_model_json['resource_count'] = 38
resource_groups_model_json['first'] = 'testString'
resource_groups_model_json['last'] = 'testString'
resource_groups_model_json['prev'] = 'testString'
resource_groups_model_json['next'] = 'testString'
resource_groups_model_json['resources'] = [resource_group_model]
# Construct a model instance of ResourceGroups by calling from_dict on the json representation
resource_groups_model = ResourceGroups.from_dict(resource_groups_model_json)
assert resource_groups_model != False
# Construct a model instance of ResourceGroups by calling from_dict on the json representation
resource_groups_model_dict = ResourceGroups.from_dict(resource_groups_model_json).__dict__
resource_groups_model2 = ResourceGroups(**resource_groups_model_dict)
# Verify the model instances are equivalent
assert resource_groups_model == resource_groups_model2
# Convert model instance back to dict and verify no loss of data
resource_groups_model_json2 = resource_groups_model.to_dict()
assert resource_groups_model_json2 == resource_groups_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspace
#-----------------------------------------------------------------------------
class TestSchematicsWorkspace():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspace
#--------------------------------------------------------
def test_schematics_workspace_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
schematics_workspace_catalog_ref_model = {} # SchematicsWorkspaceCatalogRef
schematics_workspace_catalog_ref_model['item_id'] = 'testString'
schematics_workspace_catalog_ref_model['item_name'] = 'testString'
schematics_workspace_catalog_ref_model['item_url'] = 'testString'
schematics_workspace_runtime_data_model = {} # SchematicsWorkspaceRuntimeData
schematics_workspace_runtime_data_model['id'] = 'testString'
schematics_workspace_runtime_data_model['engine_name'] = 'testString'
schematics_workspace_runtime_data_model['engine_version'] = 'testString'
schematics_workspace_runtime_data_model['state_store_url'] = 'testString'
schematics_workspace_runtime_data_model['log_store_url'] = 'testString'
schematics_workspace_template_repo_model = {} # SchematicsWorkspaceTemplateRepo
schematics_workspace_template_repo_model['repo_url'] = 'testString'
schematics_workspace_template_repo_model['chart_name'] = 'testString'
schematics_workspace_template_repo_model['script_name'] = 'testString'
schematics_workspace_template_repo_model['uninstall_script_name'] = 'testString'
schematics_workspace_template_repo_model['folder_name'] = 'testString'
schematics_workspace_template_repo_model['repo_sha_value'] = 'testString'
schematics_workspace_workspace_status_model = {} # SchematicsWorkspaceWorkspaceStatus
schematics_workspace_workspace_status_model['frozen'] = True
schematics_workspace_workspace_status_model['locked'] = True
# Construct a json representation of a SchematicsWorkspace model
schematics_workspace_model_json = {}
schematics_workspace_model_json['id'] = 'testString'
schematics_workspace_model_json['name'] = 'testString'
schematics_workspace_model_json['type'] = ['testString']
schematics_workspace_model_json['description'] = 'testString'
schematics_workspace_model_json['tags'] = ['testString']
schematics_workspace_model_json['created_at'] = '2020-01-28T18:40:40.123456Z'
schematics_workspace_model_json['created_by'] = 'testString'
schematics_workspace_model_json['status'] = 'testString'
schematics_workspace_model_json['workspace_status'] = schematics_workspace_workspace_status_model
schematics_workspace_model_json['template_ref'] = 'testString'
schematics_workspace_model_json['template_repo'] = schematics_workspace_template_repo_model
schematics_workspace_model_json['template_data'] = [{ 'foo': 'bar' }]
schematics_workspace_model_json['runtime_data'] = schematics_workspace_runtime_data_model
schematics_workspace_model_json['shared_data'] = { 'foo': 'bar' }
schematics_workspace_model_json['catalog_ref'] = schematics_workspace_catalog_ref_model
# Construct a model instance of SchematicsWorkspace by calling from_dict on the json representation
schematics_workspace_model = SchematicsWorkspace.from_dict(schematics_workspace_model_json)
assert schematics_workspace_model != False
# Construct a model instance of SchematicsWorkspace by calling from_dict on the json representation
schematics_workspace_model_dict = SchematicsWorkspace.from_dict(schematics_workspace_model_json).__dict__
schematics_workspace_model2 = SchematicsWorkspace(**schematics_workspace_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_model == schematics_workspace_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_model_json2 = schematics_workspace_model.to_dict()
assert schematics_workspace_model_json2 == schematics_workspace_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspaceCatalogRef
#-----------------------------------------------------------------------------
class TestSchematicsWorkspaceCatalogRef():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspaceCatalogRef
#--------------------------------------------------------
def test_schematics_workspace_catalog_ref_serialization(self):
# Construct a json representation of a SchematicsWorkspaceCatalogRef model
schematics_workspace_catalog_ref_model_json = {}
schematics_workspace_catalog_ref_model_json['item_id'] = 'testString'
schematics_workspace_catalog_ref_model_json['item_name'] = 'testString'
schematics_workspace_catalog_ref_model_json['item_url'] = 'testString'
# Construct a model instance of SchematicsWorkspaceCatalogRef by calling from_dict on the json representation
schematics_workspace_catalog_ref_model = SchematicsWorkspaceCatalogRef.from_dict(schematics_workspace_catalog_ref_model_json)
assert schematics_workspace_catalog_ref_model != False
# Construct a model instance of SchematicsWorkspaceCatalogRef by calling from_dict on the json representation
schematics_workspace_catalog_ref_model_dict = SchematicsWorkspaceCatalogRef.from_dict(schematics_workspace_catalog_ref_model_json).__dict__
schematics_workspace_catalog_ref_model2 = SchematicsWorkspaceCatalogRef(**schematics_workspace_catalog_ref_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_catalog_ref_model == schematics_workspace_catalog_ref_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_catalog_ref_model_json2 = schematics_workspace_catalog_ref_model.to_dict()
assert schematics_workspace_catalog_ref_model_json2 == schematics_workspace_catalog_ref_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspaceRuntimeData
#-----------------------------------------------------------------------------
class TestSchematicsWorkspaceRuntimeData():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspaceRuntimeData
#--------------------------------------------------------
def test_schematics_workspace_runtime_data_serialization(self):
# Construct a json representation of a SchematicsWorkspaceRuntimeData model
schematics_workspace_runtime_data_model_json = {}
schematics_workspace_runtime_data_model_json['id'] = 'testString'
schematics_workspace_runtime_data_model_json['engine_name'] = 'testString'
schematics_workspace_runtime_data_model_json['engine_version'] = 'testString'
schematics_workspace_runtime_data_model_json['state_store_url'] = 'testString'
schematics_workspace_runtime_data_model_json['log_store_url'] = 'testString'
# Construct a model instance of SchematicsWorkspaceRuntimeData by calling from_dict on the json representation
schematics_workspace_runtime_data_model = SchematicsWorkspaceRuntimeData.from_dict(schematics_workspace_runtime_data_model_json)
assert schematics_workspace_runtime_data_model != False
# Construct a model instance of SchematicsWorkspaceRuntimeData by calling from_dict on the json representation
schematics_workspace_runtime_data_model_dict = SchematicsWorkspaceRuntimeData.from_dict(schematics_workspace_runtime_data_model_json).__dict__
schematics_workspace_runtime_data_model2 = SchematicsWorkspaceRuntimeData(**schematics_workspace_runtime_data_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_runtime_data_model == schematics_workspace_runtime_data_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_runtime_data_model_json2 = schematics_workspace_runtime_data_model.to_dict()
assert schematics_workspace_runtime_data_model_json2 == schematics_workspace_runtime_data_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspaceSearchResult
#-----------------------------------------------------------------------------
class TestSchematicsWorkspaceSearchResult():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspaceSearchResult
#--------------------------------------------------------
def test_schematics_workspace_search_result_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
schematics_workspace_catalog_ref_model = {} # SchematicsWorkspaceCatalogRef
schematics_workspace_catalog_ref_model['item_id'] = 'testString'
schematics_workspace_catalog_ref_model['item_name'] = 'testString'
schematics_workspace_catalog_ref_model['item_url'] = 'testString'
schematics_workspace_runtime_data_model = {} # SchematicsWorkspaceRuntimeData
schematics_workspace_runtime_data_model['id'] = 'testString'
schematics_workspace_runtime_data_model['engine_name'] = 'testString'
schematics_workspace_runtime_data_model['engine_version'] = 'testString'
schematics_workspace_runtime_data_model['state_store_url'] = 'testString'
schematics_workspace_runtime_data_model['log_store_url'] = 'testString'
schematics_workspace_template_repo_model = {} # SchematicsWorkspaceTemplateRepo
schematics_workspace_template_repo_model['repo_url'] = 'testString'
schematics_workspace_template_repo_model['chart_name'] = 'testString'
schematics_workspace_template_repo_model['script_name'] = 'testString'
schematics_workspace_template_repo_model['uninstall_script_name'] = 'testString'
schematics_workspace_template_repo_model['folder_name'] = 'testString'
schematics_workspace_template_repo_model['repo_sha_value'] = 'testString'
schematics_workspace_workspace_status_model = {} # SchematicsWorkspaceWorkspaceStatus
schematics_workspace_workspace_status_model['frozen'] = True
schematics_workspace_workspace_status_model['locked'] = True
schematics_workspace_model = {} # SchematicsWorkspace
schematics_workspace_model['id'] = 'testString'
schematics_workspace_model['name'] = 'testString'
schematics_workspace_model['type'] = ['testString']
schematics_workspace_model['description'] = 'testString'
schematics_workspace_model['tags'] = ['testString']
schematics_workspace_model['created_at'] = '2020-01-28T18:40:40.123456Z'
schematics_workspace_model['created_by'] = 'testString'
schematics_workspace_model['status'] = 'testString'
schematics_workspace_model['workspace_status'] = schematics_workspace_workspace_status_model
schematics_workspace_model['template_ref'] = 'testString'
schematics_workspace_model['template_repo'] = schematics_workspace_template_repo_model
schematics_workspace_model['template_data'] = [{ 'foo': 'bar' }]
schematics_workspace_model['runtime_data'] = schematics_workspace_runtime_data_model
schematics_workspace_model['shared_data'] = { 'foo': 'bar' }
schematics_workspace_model['catalog_ref'] = schematics_workspace_catalog_ref_model
# Construct a json representation of a SchematicsWorkspaceSearchResult model
schematics_workspace_search_result_model_json = {}
schematics_workspace_search_result_model_json['offset'] = 38
schematics_workspace_search_result_model_json['limit'] = 38
schematics_workspace_search_result_model_json['total_count'] = 38
schematics_workspace_search_result_model_json['resource_count'] = 38
schematics_workspace_search_result_model_json['first'] = 'testString'
schematics_workspace_search_result_model_json['last'] = 'testString'
schematics_workspace_search_result_model_json['prev'] = 'testString'
schematics_workspace_search_result_model_json['next'] = 'testString'
schematics_workspace_search_result_model_json['resources'] = [schematics_workspace_model]
# Construct a model instance of SchematicsWorkspaceSearchResult by calling from_dict on the json representation
schematics_workspace_search_result_model = SchematicsWorkspaceSearchResult.from_dict(schematics_workspace_search_result_model_json)
assert schematics_workspace_search_result_model != False
# Construct a model instance of SchematicsWorkspaceSearchResult by calling from_dict on the json representation
schematics_workspace_search_result_model_dict = SchematicsWorkspaceSearchResult.from_dict(schematics_workspace_search_result_model_json).__dict__
schematics_workspace_search_result_model2 = SchematicsWorkspaceSearchResult(**schematics_workspace_search_result_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_search_result_model == schematics_workspace_search_result_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_search_result_model_json2 = schematics_workspace_search_result_model.to_dict()
assert schematics_workspace_search_result_model_json2 == schematics_workspace_search_result_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspaceTemplateRepo
#-----------------------------------------------------------------------------
class TestSchematicsWorkspaceTemplateRepo():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspaceTemplateRepo
#--------------------------------------------------------
def test_schematics_workspace_template_repo_serialization(self):
# Construct a json representation of a SchematicsWorkspaceTemplateRepo model
schematics_workspace_template_repo_model_json = {}
schematics_workspace_template_repo_model_json['repo_url'] = 'testString'
schematics_workspace_template_repo_model_json['chart_name'] = 'testString'
schematics_workspace_template_repo_model_json['script_name'] = 'testString'
schematics_workspace_template_repo_model_json['uninstall_script_name'] = 'testString'
schematics_workspace_template_repo_model_json['folder_name'] = 'testString'
schematics_workspace_template_repo_model_json['repo_sha_value'] = 'testString'
# Construct a model instance of SchematicsWorkspaceTemplateRepo by calling from_dict on the json representation
schematics_workspace_template_repo_model = SchematicsWorkspaceTemplateRepo.from_dict(schematics_workspace_template_repo_model_json)
assert schematics_workspace_template_repo_model != False
# Construct a model instance of SchematicsWorkspaceTemplateRepo by calling from_dict on the json representation
schematics_workspace_template_repo_model_dict = SchematicsWorkspaceTemplateRepo.from_dict(schematics_workspace_template_repo_model_json).__dict__
schematics_workspace_template_repo_model2 = SchematicsWorkspaceTemplateRepo(**schematics_workspace_template_repo_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_template_repo_model == schematics_workspace_template_repo_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_template_repo_model_json2 = schematics_workspace_template_repo_model.to_dict()
assert schematics_workspace_template_repo_model_json2 == schematics_workspace_template_repo_model_json
#-----------------------------------------------------------------------------
# Test Class for SchematicsWorkspaceWorkspaceStatus
#-----------------------------------------------------------------------------
class TestSchematicsWorkspaceWorkspaceStatus():
#--------------------------------------------------------
# Test serialization/deserialization for SchematicsWorkspaceWorkspaceStatus
#--------------------------------------------------------
def test_schematics_workspace_workspace_status_serialization(self):
# Construct a json representation of a SchematicsWorkspaceWorkspaceStatus model
schematics_workspace_workspace_status_model_json = {}
schematics_workspace_workspace_status_model_json['frozen'] = True
schematics_workspace_workspace_status_model_json['locked'] = True
# Construct a model instance of SchematicsWorkspaceWorkspaceStatus by calling from_dict on the json representation
schematics_workspace_workspace_status_model = SchematicsWorkspaceWorkspaceStatus.from_dict(schematics_workspace_workspace_status_model_json)
assert schematics_workspace_workspace_status_model != False
# Construct a model instance of SchematicsWorkspaceWorkspaceStatus by calling from_dict on the json representation
schematics_workspace_workspace_status_model_dict = SchematicsWorkspaceWorkspaceStatus.from_dict(schematics_workspace_workspace_status_model_json).__dict__
schematics_workspace_workspace_status_model2 = SchematicsWorkspaceWorkspaceStatus(**schematics_workspace_workspace_status_model_dict)
# Verify the model instances are equivalent
assert schematics_workspace_workspace_status_model == schematics_workspace_workspace_status_model2
# Convert model instance back to dict and verify no loss of data
schematics_workspace_workspace_status_model_json2 = schematics_workspace_workspace_status_model.to_dict()
assert schematics_workspace_workspace_status_model_json2 == schematics_workspace_workspace_status_model_json
#-----------------------------------------------------------------------------
# Test Class for Script
#-----------------------------------------------------------------------------
class TestScript():
#--------------------------------------------------------
# Test serialization/deserialization for Script
#--------------------------------------------------------
def test_script_serialization(self):
# Construct a json representation of a Script model
script_model_json = {}
script_model_json['instructions'] = 'testString'
script_model_json['script'] = 'testString'
script_model_json['script_permission'] = 'testString'
script_model_json['delete_script'] = 'testString'
script_model_json['scope'] = 'testString'
# Construct a model instance of Script by calling from_dict on the json representation
script_model = Script.from_dict(script_model_json)
assert script_model != False
# Construct a model instance of Script by calling from_dict on the json representation
script_model_dict = Script.from_dict(script_model_json).__dict__
script_model2 = Script(**script_model_dict)
# Verify the model instances are equivalent
assert script_model == script_model2
# Convert model instance back to dict and verify no loss of data
script_model_json2 = script_model.to_dict()
assert script_model_json2 == script_model_json
#-----------------------------------------------------------------------------
# Test Class for State
#-----------------------------------------------------------------------------
class TestState():
#--------------------------------------------------------
# Test serialization/deserialization for State
#--------------------------------------------------------
def test_state_serialization(self):
# Construct a json representation of a State model
state_model_json = {}
state_model_json['current'] = 'testString'
state_model_json['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model_json['pending'] = 'testString'
state_model_json['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model_json['previous'] = 'testString'
# Construct a model instance of State by calling from_dict on the json representation
state_model = State.from_dict(state_model_json)
assert state_model != False
# Construct a model instance of State by calling from_dict on the json representation
state_model_dict = State.from_dict(state_model_json).__dict__
state_model2 = State(**state_model_dict)
# Verify the model instances are equivalent
assert state_model == state_model2
# Convert model instance back to dict and verify no loss of data
state_model_json2 = state_model.to_dict()
assert state_model_json2 == state_model_json
#-----------------------------------------------------------------------------
# Test Class for SyndicationAuthorization
#-----------------------------------------------------------------------------
class TestSyndicationAuthorization():
#--------------------------------------------------------
# Test serialization/deserialization for SyndicationAuthorization
#--------------------------------------------------------
def test_syndication_authorization_serialization(self):
# Construct a json representation of a SyndicationAuthorization model
syndication_authorization_model_json = {}
syndication_authorization_model_json['token'] = 'testString'
syndication_authorization_model_json['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a model instance of SyndicationAuthorization by calling from_dict on the json representation
syndication_authorization_model = SyndicationAuthorization.from_dict(syndication_authorization_model_json)
assert syndication_authorization_model != False
# Construct a model instance of SyndicationAuthorization by calling from_dict on the json representation
syndication_authorization_model_dict = SyndicationAuthorization.from_dict(syndication_authorization_model_json).__dict__
syndication_authorization_model2 = SyndicationAuthorization(**syndication_authorization_model_dict)
# Verify the model instances are equivalent
assert syndication_authorization_model == syndication_authorization_model2
# Convert model instance back to dict and verify no loss of data
syndication_authorization_model_json2 = syndication_authorization_model.to_dict()
assert syndication_authorization_model_json2 == syndication_authorization_model_json
#-----------------------------------------------------------------------------
# Test Class for SyndicationCluster
#-----------------------------------------------------------------------------
class TestSyndicationCluster():
#--------------------------------------------------------
# Test serialization/deserialization for SyndicationCluster
#--------------------------------------------------------
def test_syndication_cluster_serialization(self):
# Construct a json representation of a SyndicationCluster model
syndication_cluster_model_json = {}
syndication_cluster_model_json['region'] = 'testString'
syndication_cluster_model_json['id'] = 'testString'
syndication_cluster_model_json['name'] = 'testString'
syndication_cluster_model_json['resource_group_name'] = 'testString'
syndication_cluster_model_json['type'] = 'testString'
syndication_cluster_model_json['namespaces'] = ['testString']
syndication_cluster_model_json['all_namespaces'] = True
# Construct a model instance of SyndicationCluster by calling from_dict on the json representation
syndication_cluster_model = SyndicationCluster.from_dict(syndication_cluster_model_json)
assert syndication_cluster_model != False
# Construct a model instance of SyndicationCluster by calling from_dict on the json representation
syndication_cluster_model_dict = SyndicationCluster.from_dict(syndication_cluster_model_json).__dict__
syndication_cluster_model2 = SyndicationCluster(**syndication_cluster_model_dict)
# Verify the model instances are equivalent
assert syndication_cluster_model == syndication_cluster_model2
# Convert model instance back to dict and verify no loss of data
syndication_cluster_model_json2 = syndication_cluster_model.to_dict()
assert syndication_cluster_model_json2 == syndication_cluster_model_json
#-----------------------------------------------------------------------------
# Test Class for SyndicationHistory
#-----------------------------------------------------------------------------
class TestSyndicationHistory():
#--------------------------------------------------------
# Test serialization/deserialization for SyndicationHistory
#--------------------------------------------------------
def test_syndication_history_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
syndication_cluster_model = {} # SyndicationCluster
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
# Construct a json representation of a SyndicationHistory model
syndication_history_model_json = {}
syndication_history_model_json['namespaces'] = ['testString']
syndication_history_model_json['clusters'] = [syndication_cluster_model]
syndication_history_model_json['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a model instance of SyndicationHistory by calling from_dict on the json representation
syndication_history_model = SyndicationHistory.from_dict(syndication_history_model_json)
assert syndication_history_model != False
# Construct a model instance of SyndicationHistory by calling from_dict on the json representation
syndication_history_model_dict = SyndicationHistory.from_dict(syndication_history_model_json).__dict__
syndication_history_model2 = SyndicationHistory(**syndication_history_model_dict)
# Verify the model instances are equivalent
assert syndication_history_model == syndication_history_model2
# Convert model instance back to dict and verify no loss of data
syndication_history_model_json2 = syndication_history_model.to_dict()
assert syndication_history_model_json2 == syndication_history_model_json
#-----------------------------------------------------------------------------
# Test Class for SyndicationResource
#-----------------------------------------------------------------------------
class TestSyndicationResource():
#--------------------------------------------------------
# Test serialization/deserialization for SyndicationResource
#--------------------------------------------------------
def test_syndication_resource_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
syndication_cluster_model = {} # SyndicationCluster
syndication_cluster_model['region'] = 'testString'
syndication_cluster_model['id'] = 'testString'
syndication_cluster_model['name'] = 'testString'
syndication_cluster_model['resource_group_name'] = 'testString'
syndication_cluster_model['type'] = 'testString'
syndication_cluster_model['namespaces'] = ['testString']
syndication_cluster_model['all_namespaces'] = True
syndication_authorization_model = {} # SyndicationAuthorization
syndication_authorization_model['token'] = 'testString'
syndication_authorization_model['last_run'] = '2020-01-28T18:40:40.123456Z'
syndication_history_model = {} # SyndicationHistory
syndication_history_model['namespaces'] = ['testString']
syndication_history_model['clusters'] = [syndication_cluster_model]
syndication_history_model['last_run'] = '2020-01-28T18:40:40.123456Z'
# Construct a json representation of a SyndicationResource model
syndication_resource_model_json = {}
syndication_resource_model_json['remove_related_components'] = True
syndication_resource_model_json['clusters'] = [syndication_cluster_model]
syndication_resource_model_json['history'] = syndication_history_model
syndication_resource_model_json['authorization'] = syndication_authorization_model
# Construct a model instance of SyndicationResource by calling from_dict on the json representation
syndication_resource_model = SyndicationResource.from_dict(syndication_resource_model_json)
assert syndication_resource_model != False
# Construct a model instance of SyndicationResource by calling from_dict on the json representation
syndication_resource_model_dict = SyndicationResource.from_dict(syndication_resource_model_json).__dict__
syndication_resource_model2 = SyndicationResource(**syndication_resource_model_dict)
# Verify the model instances are equivalent
assert syndication_resource_model == syndication_resource_model2
# Convert model instance back to dict and verify no loss of data
syndication_resource_model_json2 = syndication_resource_model.to_dict()
assert syndication_resource_model_json2 == syndication_resource_model_json
#-----------------------------------------------------------------------------
# Test Class for Validation
#-----------------------------------------------------------------------------
class TestValidation():
#--------------------------------------------------------
# Test serialization/deserialization for Validation
#--------------------------------------------------------
def test_validation_serialization(self):
# Construct a json representation of a Validation model
validation_model_json = {}
validation_model_json['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model_json['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model_json['state'] = 'testString'
validation_model_json['last_operation'] = 'testString'
validation_model_json['target'] = { 'foo': 'bar' }
# Construct a model instance of Validation by calling from_dict on the json representation
validation_model = Validation.from_dict(validation_model_json)
assert validation_model != False
# Construct a model instance of Validation by calling from_dict on the json representation
validation_model_dict = Validation.from_dict(validation_model_json).__dict__
validation_model2 = Validation(**validation_model_dict)
# Verify the model instances are equivalent
assert validation_model == validation_model2
# Convert model instance back to dict and verify no loss of data
validation_model_json2 = validation_model.to_dict()
assert validation_model_json2 == validation_model_json
#-----------------------------------------------------------------------------
# Test Class for Version
#-----------------------------------------------------------------------------
class TestVersion():
#--------------------------------------------------------
# Test serialization/deserialization for Version
#--------------------------------------------------------
def test_version_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
configuration_model = {} # Configuration
configuration_model['key'] = 'testString'
configuration_model['type'] = 'testString'
configuration_model['default_value'] = { 'foo': 'bar' }
configuration_model['value_constraint'] = 'testString'
configuration_model['description'] = 'testString'
configuration_model['required'] = True
configuration_model['options'] = [{ 'foo': 'bar' }]
configuration_model['hidden'] = True
license_model = {} # License
license_model['id'] = 'testString'
license_model['name'] = 'testString'
license_model['type'] = 'testString'
license_model['url'] = 'testString'
license_model['description'] = 'testString'
resource_model = {} # Resource
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
script_model = {} # Script
script_model['instructions'] = 'testString'
script_model['script'] = 'testString'
script_model['script_permission'] = 'testString'
script_model['delete_script'] = 'testString'
script_model['scope'] = 'testString'
state_model = {} # State
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
validation_model = {} # Validation
validation_model['validated'] = '2020-01-28T18:40:40.123456Z'
validation_model['requested'] = '2020-01-28T18:40:40.123456Z'
validation_model['state'] = 'testString'
validation_model['last_operation'] = 'testString'
validation_model['target'] = { 'foo': 'bar' }
version_entitlement_model = {} # VersionEntitlement
version_entitlement_model['provider_name'] = 'testString'
version_entitlement_model['provider_id'] = 'testString'
version_entitlement_model['product_id'] = 'testString'
version_entitlement_model['part_numbers'] = ['testString']
version_entitlement_model['image_repo_name'] = 'testString'
# Construct a json representation of a Version model
version_model_json = {}
version_model_json['id'] = 'testString'
version_model_json['_rev'] = 'testString'
version_model_json['crn'] = 'testString'
version_model_json['version'] = 'testString'
version_model_json['sha'] = 'testString'
version_model_json['created'] = '2020-01-28T18:40:40.123456Z'
version_model_json['updated'] = '2020-01-28T18:40:40.123456Z'
version_model_json['offering_id'] = 'testString'
version_model_json['catalog_id'] = 'testString'
version_model_json['kind_id'] = 'testString'
version_model_json['tags'] = ['testString']
version_model_json['repo_url'] = 'testString'
version_model_json['source_url'] = 'testString'
version_model_json['tgz_url'] = 'testString'
version_model_json['configuration'] = [configuration_model]
version_model_json['metadata'] = { 'foo': 'bar' }
version_model_json['validation'] = validation_model
version_model_json['required_resources'] = [resource_model]
version_model_json['single_instance'] = True
version_model_json['install'] = script_model
version_model_json['pre_install'] = [script_model]
version_model_json['entitlement'] = version_entitlement_model
version_model_json['licenses'] = [license_model]
version_model_json['image_manifest_url'] = 'testString'
version_model_json['deprecated'] = True
version_model_json['package_version'] = 'testString'
version_model_json['state'] = state_model
version_model_json['version_locator'] = 'testString'
version_model_json['console_url'] = 'testString'
version_model_json['long_description'] = 'testString'
version_model_json['whitelisted_accounts'] = ['testString']
# Construct a model instance of Version by calling from_dict on the json representation
version_model = Version.from_dict(version_model_json)
assert version_model != False
# Construct a model instance of Version by calling from_dict on the json representation
version_model_dict = Version.from_dict(version_model_json).__dict__
version_model2 = Version(**version_model_dict)
# Verify the model instances are equivalent
assert version_model == version_model2
# Convert model instance back to dict and verify no loss of data
version_model_json2 = version_model.to_dict()
assert version_model_json2 == version_model_json
#-----------------------------------------------------------------------------
# Test Class for VersionEntitlement
#-----------------------------------------------------------------------------
class TestVersionEntitlement():
#--------------------------------------------------------
# Test serialization/deserialization for VersionEntitlement
#--------------------------------------------------------
def test_version_entitlement_serialization(self):
# Construct a json representation of a VersionEntitlement model
version_entitlement_model_json = {}
version_entitlement_model_json['provider_name'] = 'testString'
version_entitlement_model_json['provider_id'] = 'testString'
version_entitlement_model_json['product_id'] = 'testString'
version_entitlement_model_json['part_numbers'] = ['testString']
version_entitlement_model_json['image_repo_name'] = 'testString'
# Construct a model instance of VersionEntitlement by calling from_dict on the json representation
version_entitlement_model = VersionEntitlement.from_dict(version_entitlement_model_json)
assert version_entitlement_model != False
# Construct a model instance of VersionEntitlement by calling from_dict on the json representation
version_entitlement_model_dict = VersionEntitlement.from_dict(version_entitlement_model_json).__dict__
version_entitlement_model2 = VersionEntitlement(**version_entitlement_model_dict)
# Verify the model instances are equivalent
assert version_entitlement_model == version_entitlement_model2
# Convert model instance back to dict and verify no loss of data
version_entitlement_model_json2 = version_entitlement_model.to_dict()
assert version_entitlement_model_json2 == version_entitlement_model_json
#-----------------------------------------------------------------------------
# Test Class for VersionUpdateDescriptor
#-----------------------------------------------------------------------------
class TestVersionUpdateDescriptor():
#--------------------------------------------------------
# Test serialization/deserialization for VersionUpdateDescriptor
#--------------------------------------------------------
def test_version_update_descriptor_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
resource_model = {} # Resource
resource_model['type'] = 'mem'
resource_model['value'] = { 'foo': 'bar' }
state_model = {} # State
state_model['current'] = 'testString'
state_model['current_entered'] = '2020-01-28T18:40:40.123456Z'
state_model['pending'] = 'testString'
state_model['pending_requested'] = '2020-01-28T18:40:40.123456Z'
state_model['previous'] = 'testString'
# Construct a json representation of a VersionUpdateDescriptor model
version_update_descriptor_model_json = {}
version_update_descriptor_model_json['version_locator'] = 'testString'
version_update_descriptor_model_json['version'] = 'testString'
version_update_descriptor_model_json['state'] = state_model
version_update_descriptor_model_json['required_resources'] = [resource_model]
version_update_descriptor_model_json['package_version'] = 'testString'
version_update_descriptor_model_json['can_update'] = True
version_update_descriptor_model_json['messages'] = { 'foo': 'bar' }
# Construct a model instance of VersionUpdateDescriptor by calling from_dict on the json representation
version_update_descriptor_model = VersionUpdateDescriptor.from_dict(version_update_descriptor_model_json)
assert version_update_descriptor_model != False
# Construct a model instance of VersionUpdateDescriptor by calling from_dict on the json representation
version_update_descriptor_model_dict = VersionUpdateDescriptor.from_dict(version_update_descriptor_model_json).__dict__
version_update_descriptor_model2 = VersionUpdateDescriptor(**version_update_descriptor_model_dict)
# Verify the model instances are equivalent
assert version_update_descriptor_model == version_update_descriptor_model2
# Convert model instance back to dict and verify no loss of data
version_update_descriptor_model_json2 = version_update_descriptor_model.to_dict()
assert version_update_descriptor_model_json2 == version_update_descriptor_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
| 57.206759
| 4,075
| 0.607995
| 53,788
| 531,508
| 5.695917
| 0.011899
| 0.020886
| 0.014074
| 0.016633
| 0.9297
| 0.892562
| 0.847336
| 0.796244
| 0.771369
| 0.753068
| 0
| 0.021762
| 0.188007
| 531,508
| 9,290
| 4,076
| 57.212917
| 0.688121
| 0.198232
| 0
| 0.681535
| 0
| 0.014573
| 0.405758
| 0.074365
| 0
| 0
| 0
| 0
| 0.10805
| 1
| 0.047094
| false
| 0.001599
| 0.004621
| 0
| 0.095966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7292c0a56b03ba9c7772ad4e5ac6ff128912a8da
| 2,444
|
py
|
Python
|
Estrutura_de_decisao/Exercicio_12.py
|
Guga-crypto/Exercicios_Iniciante
|
e9bdf352aacf3a67b0cc879ac969df486d01f86d
|
[
"MIT"
] | null | null | null |
Estrutura_de_decisao/Exercicio_12.py
|
Guga-crypto/Exercicios_Iniciante
|
e9bdf352aacf3a67b0cc879ac969df486d01f86d
|
[
"MIT"
] | null | null | null |
Estrutura_de_decisao/Exercicio_12.py
|
Guga-crypto/Exercicios_Iniciante
|
e9bdf352aacf3a67b0cc879ac969df486d01f86d
|
[
"MIT"
] | null | null | null |
salario_hora = float(input('Qual o valor da hora trabalhada?\n'))
hora_trab = int(input('Quantas horas trabalhou no mês?\n'))
salario_bruto = hora_trab * salario_hora
if salario_bruto < 900:
print(f"Salário Bruto: ({salario_hora} * {hora_trab})\t\t: R$ {salario_bruto}")
print('(-) IR (isento)\t\t\t\t : R$ 0,0')
inss = (salario_bruto * 10) / 100
print(f'(-) INSS (10%)\t\t\t\t : R$ {inss}')
fgts = (salario_bruto * 11) / 100
print(f'FGTS (11%)\t\t\t\t : R$ {fgts}')
print(f'Total de descontos\t\t\t : R$ {inss}')
salario_liquido = salario_bruto - inss
print(f'Salário liquido\t\t\t\t : R$ {salario_liquido}')
elif 900 <= salario_bruto < 1500:
print(f"Salário Bruto: ({salario_hora} * {hora_trab})\t\t: R$ {salario_bruto})")
ir = (salario_bruto * 5) / 100
print(f'(-) IR (5%)\t\t\t\t : R$ {ir}')
inss = (salario_bruto * 10) / 100
print(f'(-) INSS (10%)\t\t\t\t : R$ {inss}')
fgts = (salario_bruto * 11) / 100
print(f'FGTS (11%)\t\t\t\t : R$ {fgts}')
descontos = inss + ir
print(f'Total de descontos\t\t\t : R$ {descontos}')
salario_liquido = salario_bruto - descontos
print(f'Salário liquido\t\t\t\t : R$ {salario_liquido}')
elif 1500 <= salario_bruto <2500:
print(f"Salário Bruto: ({salario_hora} * {hora_trab})\t\t: R$ {salario_bruto})")
ir = (salario_bruto * 10) / 100
print(f'(-) IR (10%)\t\t\t\t : R$ {ir}')
inss = (salario_bruto * 10) / 100
print(f'(-) INSS (10%)\t\t\t\t : R$ {inss}')
fgts = (salario_bruto * 11) / 100
print(f'FGTS (11%)\t\t\t\t : R$ {fgts}')
descontos = inss + ir
print(f'Total de descontos\t\t\t : R$ {descontos}')
salario_liquido = salario_bruto - descontos
print(f'Salário liquido\t\t\t\t : R$ {salario_liquido}')
elif salario_bruto >= 2500:
print(f"Salário Bruto: ({salario_hora} * {hora_trab})\t\t: R$ {salario_bruto}")
ir = (salario_bruto * 20) / 100
print(f'(-) IR (20%)\t\t\t\t : R$ {ir}')
inss = (salario_bruto * 10) / 100
print(f'(-) INSS (10%)\t\t\t\t : R$ {inss}')
fgts = (salario_bruto * 11) / 100
print(f'FGTS (11%)\t\t\t\t : R$ {fgts}')
descontos = inss + ir
print(f'Total de descontos\t\t\t : R$ {descontos}')
salario_liquido = salario_bruto - descontos
print(f'Salário liquido\t\t\t\t : R$ {salario_liquido}')
else:
print ('Entradas inválidas.')
| 45.259259
| 84
| 0.579787
| 389
| 2,444
| 3.529563
| 0.115681
| 0.0874
| 0.07866
| 0.058267
| 0.808449
| 0.801894
| 0.792425
| 0.792425
| 0.792425
| 0.773489
| 0
| 0.052298
| 0.22545
| 2,444
| 53
| 85
| 46.113208
| 0.673006
| 0
| 0
| 0.647059
| 0
| 0
| 0.477087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.490196
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
72d2add59bbf0ed0044520d0a45a59739008a9c7
| 103
|
py
|
Python
|
synthesis/__init__.py
|
sahilk15/synthetic_data_generation
|
06676d15821fc5ce27464a46d294fd3bb484ecfa
|
[
"MIT"
] | 4
|
2021-02-16T00:38:27.000Z
|
2022-01-21T21:59:28.000Z
|
synthesis/__init__.py
|
Daan0/synthetic_data_generation
|
5a0d1818cba2bc8b629869773a2f86a156d25fd9
|
[
"MIT"
] | 2
|
2021-08-20T14:32:59.000Z
|
2022-03-18T10:15:40.000Z
|
synthesis/__init__.py
|
Daan0/synthetic_data_generation
|
5a0d1818cba2bc8b629869773a2f86a156d25fd9
|
[
"MIT"
] | 5
|
2020-12-08T05:01:39.000Z
|
2022-03-24T18:12:45.000Z
|
from synthesis import synthesizers
from synthesis import transformers
from synthesis import evaluation
| 25.75
| 34
| 0.883495
| 12
| 103
| 7.583333
| 0.5
| 0.428571
| 0.626374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 103
| 3
| 35
| 34.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
72f94660d74110557a4db99f605fa00e1048d7bd
| 2,708
|
py
|
Python
|
users_api/migrations/0002_auto_20220226_0659.py
|
gonulalan/budi-food-api
|
2120c03f3985000cd41f0371d4138cd2fa0223cf
|
[
"MIT"
] | null | null | null |
users_api/migrations/0002_auto_20220226_0659.py
|
gonulalan/budi-food-api
|
2120c03f3985000cd41f0371d4138cd2fa0223cf
|
[
"MIT"
] | null | null | null |
users_api/migrations/0002_auto_20220226_0659.py
|
gonulalan/budi-food-api
|
2120c03f3985000cd41f0371d4138cd2fa0223cf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-02-26 06:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users_api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='company_address',
field=models.CharField(blank=True, default=None, max_length=500, null=True),
),
migrations.AddField(
model_name='userprofile',
name='company_official_name',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='company_sign_name',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='company_tax_administration',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='company_tax_id',
field=models.CharField(blank=True, default=None, max_length=20, null=True),
),
migrations.AddField(
model_name='userprofile',
name='getiryemek_api_key',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='getiryemek_api_secret',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='identity_id',
field=models.CharField(blank=True, default=None, max_length=20, null=True),
),
migrations.AddField(
model_name='userprofile',
name='trendyolyemek_api_key',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='trendyolyemek_api_secret',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='yemeksepeti_api_key',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='yemeksepeti_api_secret',
field=models.CharField(blank=True, default=None, max_length=255, null=True),
),
]
| 36.594595
| 88
| 0.598227
| 278
| 2,708
| 5.654676
| 0.194245
| 0.137405
| 0.175573
| 0.206107
| 0.881043
| 0.881043
| 0.881043
| 0.849873
| 0.849873
| 0.782443
| 0
| 0.027921
| 0.28582
| 2,708
| 73
| 89
| 37.09589
| 0.784902
| 0.016987
| 0
| 0.701493
| 1
| 0
| 0.143609
| 0.050752
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f43e94cc0186dcfafa2ac55ed75d38c998a8c86d
| 5,493
|
py
|
Python
|
test/test_suda.py
|
scottbw/suda
|
f89cd18055a3b941c1c00fda7b23a549516ee86f
|
[
"MIT"
] | null | null | null |
test/test_suda.py
|
scottbw/suda
|
f89cd18055a3b941c1c00fda7b23a549516ee86f
|
[
"MIT"
] | null | null | null |
test/test_suda.py
|
scottbw/suda
|
f89cd18055a3b941c1c00fda7b23a549516ee86f
|
[
"MIT"
] | null | null | null |
import random
import pandas as pd
import pytest
from suda import suda, find_msu
@pytest.fixture
def data():
persons = [
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'rural', 'education': 'secondary complete', 'labourstatus': 'unemployed'},
{'gender': 'male', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'primary complete', 'labourstatus': 'non-LF'},
{'gender': 'male', 'region': 'urban', 'education': 'post-secondary', 'labourstatus': 'unemployed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'non-LF'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary complete', 'labourstatus': 'non-LF'}
]
return pd.DataFrame(persons)
@pytest.fixture
def large_data():
return pd.read_csv('test_data.csv')
# def test_performance(large_data):
# suda(large_data, 4)
def test_msu(data):
groups = [['gender', 'region']]
aggregations = {'msu': 'min', 'suda': 'sum', 'fK': 'min', 'fM': 'sum'}
for column in data.columns:
aggregations[column] = 'max'
results = find_msu(data, groups=groups, aggregations=aggregations, att=4)
results = results.fillna(0)
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 0)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 2)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 0)
assert(results.loc[7].msu == 0)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 0)
def test_suda(data):
results = suda(data, max_msu=3)
print(results)
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 1)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 1)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 1)
assert(results.loc[7].msu == 1)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 2)
assert (results.loc[0].suda == 0)
assert (results.loc[1].suda == 0)
assert (results.loc[2].suda == 15)
assert(results.loc[3].suda == 0)
assert (results.loc[4].suda == 20)
assert (results.loc[5].suda == 0)
assert (results.loc[6].suda == 15)
assert(results.loc[7].suda == 20)
assert (results.loc[8].suda == 0)
assert (results.loc[9].suda == 0)
assert (results.loc[10].suda == 5)
def test_suda_with_columns(data):
results = suda(data, max_msu=2, columns=['gender', 'region', 'education'])
# check we get back columns we didn't include in SUDA calcs
assert(results.loc[0].labourstatus == 'employed')
assert (results.loc[0].msu == 0)
assert (results.loc[1].msu == 0)
assert (results.loc[2].msu == 1)
assert(results.loc[3].msu == 0)
assert (results.loc[4].msu == 1)
assert (results.loc[5].msu == 0)
assert (results.loc[6].msu == 1)
assert(results.loc[7].msu == 1)
assert (results.loc[8].msu == 0)
assert (results.loc[9].msu == 0)
assert (results.loc[10].msu == 0)
assert (results.loc[0].suda == 0)
assert (results.loc[1].suda == 0)
assert (results.loc[2].suda == 4)
assert(results.loc[3].suda == 0)
assert (results.loc[4].suda == 4)
assert (results.loc[5].suda == 0)
assert (results.loc[6].suda == 4)
assert(results.loc[7].suda == 4)
assert (results.loc[8].suda == 0)
assert (results.loc[9].suda == 0)
assert (results.loc[10].suda == 0)
def test_suda_no_uniques():
persons = [
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'}, {'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'},
{'gender': 'female', 'region': 'urban', 'education': 'secondary incomplete', 'labourstatus': 'employed'}
]
df = pd.DataFrame(persons)
results = suda(df, max_msu=3)
assert(results.equals(df))
| 43.595238
| 226
| 0.610231
| 672
| 5,493
| 4.959821
| 0.120536
| 0.222322
| 0.268827
| 0.178518
| 0.780678
| 0.753675
| 0.732673
| 0.732673
| 0.705071
| 0.705071
| 0
| 0.028037
| 0.181868
| 5,493
| 126
| 227
| 43.595238
| 0.713618
| 0.020936
| 0
| 0.537736
| 0
| 0
| 0.290233
| 0
| 0
| 0
| 0
| 0
| 0.537736
| 1
| 0.056604
| false
| 0
| 0.037736
| 0.009434
| 0.113208
| 0.009434
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
bec7d767b8dc8231986a5aeb3cc5e4c6af6c1b30
| 59,016
|
py
|
Python
|
roberta/StaticData.py
|
sroopsai/robertalab-ev3dev
|
7046991247f0e5c598246445e9f65817f432983e
|
[
"Apache-2.0"
] | null | null | null |
roberta/StaticData.py
|
sroopsai/robertalab-ev3dev
|
7046991247f0e5c598246445e9f65817f432983e
|
[
"Apache-2.0"
] | null | null | null |
roberta/StaticData.py
|
sroopsai/robertalab-ev3dev
|
7046991247f0e5c598246445e9f65817f432983e
|
[
"Apache-2.0"
] | 1
|
2021-01-28T14:55:15.000Z
|
2021-01-28T14:55:15.000Z
|
IMAGES = {
'oldglasses':
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\xff\x1f\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\x3f\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\xff\x03\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\xff\x07\xfc\xff\xff\x0f\x00\x00\x00\x00\x00\xfc\xff\xff\xff\xff\xff\x03\x00\x00\x00\x00\x80\xff\x01\xf0\xff\xff\x1f\x00\x00\x00\x00\x00\xfe\x3f\xf0\xff\xff\xff\x07\x00\x00\x00\x00\xe0\xff\x00\xf0\xff\xff\x3f\x00\x00\x00\x00\x00\xff\x0f\xc0\xff\xff\xff\x0f\x00\x00\x00\x00\xf0\x3f\x00\xe0\xff\xff\x7f\x00\x00\x00\x00\x80\xff\x03\xc0\xff\xff\xff\x1f\x00\x00\x00\x00\xf8\x1f\x00\xe0\xff\xff\xff\x00\x00\x00\x00\xc0\xff\x00\x80\xff\xff\xff\x7f\x00\x00\x00\x00\xfc\x0f\x00\xe0\xff\xff\xff\x01\x00\x00\x00\xe0\x7f\x00\x80\xff\xff\xff\x7f\x00\x00\x00\x00\xfe\x07\x00\xe0\xff\xff\xff\x03\x00\x00\x00\xf0\x3f\x00\x80\xff\xff\xff\xff\x00\x00\x00\x00\xff\x03\x00\xf0\xff\xff\xff\x07\x00\x00\x00\xf8\x1f\x00\x80\xff\xff\xff\xff\x01\x00\x00\x80\xff\x01\x00\xf0\xff\xff\xff\x0f\x00\x00\x00\xf8\x0f\x00\xc0\xff\xff\xff\xff\x03\x00\x00\xc0\xff\x00\x00\xfc\xff\xff\xff\x0f\x00\x00\x00\xfc\x07\x00\xc0\xff\xff\xff\xff\x03\x00\x00\xc0\xff\x00\x80\xff\xff\xff\xff\x1f\x00\x00\x00\xfe\x03\x00\xf0\xff\xff\xff\xff\x03\x00\x00\xc0\x7f\x00\xc0\xff\xff\xff\xff\x3f\x00\x00\x00\xfe\x01\x00\xfc\xff\xff\xff\xff\x07\x00\x00\xe0\x3f\x00\xe0\xff\xff\xff\xff\x3f\x00\x00\x00\xff\x01\x00\xff\xff\xff\xff\xff\x0f\x00\x00\xf0\x3f\x00\xf0\xff\xff\xff\xff\x7f\x00\x00\x00\xff\x00\x80\xff\xff\xff\xff\xff\x0f\x00\x00\xf0\x1f\x00\xf8\xff\xff\xff\xff\x7f\x00\x00\x80\xff\x00\xc0\xff\xff\xff\xff\xff\x1f\x00\x00\xf8\x1f\x00\xfc\xff\xff\xff\xff\xff\x00\x00\x80\x7f\x00\xe0\xff\xff\xff\xff\xff\x1f\x00\x00\xf8\x0f\x00\xfe\xff\xff\xff\xff\xff\x00\x00\xc0\x7f\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x0f\x00\xff\xff\xff\xff\xff\xff\x01\x00\xc0\x3f\x00\xf8\xff\xff\xff\xff\xff\x1f\x00\x00\xfc\x07\x00\xff\xff\xff\xff\xff\xff\x01\x00\xc0\x3f\x00\xfc\xff\xff\xff\xff\xff\x3f\x00\x00\xfc\x07\x80\xff\xff\xff\xff\xff\xff\x01\x00\xe0\x1f\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\xc0\xff\xff\xff\xff\xff\xff\x03\x00\xe0\x1f\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\xc0\xff\xff\xff\xff\xff\xff\x03\x00\xe0\x1f\x00\xff\xff\xff\xff\xff\xff\x7f\x00\x00\xfe\x03\xe0\xff\xff\xff\xff\xff\xff\x03\x00\xe0\x1f\x00\xff\xff\xff\xff\xff\xff\x7f\x80\x01\xfe\x03\xe0\xff\xff\xff\xff\xff\xff\x03\x00\xe0\x1f\x80\xff\xff\xff\xff\xff\xff\xff\xf0\x0f\xff\x03\xe0\xff\xff\xff\xff\xff\xff\x03\x00\xf0\x1f\x80\xff\xff\xff\xff\xff\xff\xff\xf8\x1f\xff\x03\xf0\xff\xff\xff\xff\xff\xff\x07\x00\xf0\x3f\xc0\xff\xff\xff\xff\xff\xff\xff\x3e\x7c\xff\x07\xf0\xff\xff\xff\xff\xff\xff\x07\x00\xf0\x3f\xc0\xff\xff\xff\xff\xff\xff\xff\x1f\xf8\xff\x07\xf8\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xf0\xff\xff\xff\xff\xff\xff\xff\x0f\xf0\xff\x1f\xfe\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe3\xc7\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x1f\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\xe0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\xc0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x03\x00\xc0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\xc0\xff\xff\xff\xff\xff\xff\xff\xff\x3f\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\xc0\xff\xff\xff\xff\xff\xff\xff\xff\x3f\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x80\xff\xff\xff\xff\xff\xff\xff\xff\x1f\x00\x00\xfc\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x80\xff\xff\xff\xff\xff\xff\xff\xff\x1f\x00\x00\xfc\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x0f\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x0f\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\x07\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\x3f\x00\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\x07\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\x3f\x00\x00\x00\xfc\xff\xff\xff\xff\xff\xff\xff\x03\x00\x00\xe0\xff\xff\xff\xff\xff\xff\xff\x1f\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\xc0\xff\xff\xff\xff\xff\xff\xff\x0f\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\xc0\xff\xff\xff\xff\xff\xff\xff\x0f\x00\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x80\xff\xff\xff\xff\xff\xff\xff\x07\x00\x00\x00\xe0\xff\xff\xff\xff\xff\xff\x7f\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\x03\x00\x00\x00\xc0\xff\xff\xff\xff\xff\xff\x3f\x00\x00\x00\x00\xfe\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x80\xff\xff\xff\xff\xff\xff\x1f\x00\x00\x00\x00\xfc\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\x0f\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\x7f\x00\x00\x00\x00\x00\xfe\xff\xff\xff\xff\xff\x07\x00\x00\x00\x00\xf0\xff\xff\xff\xff\xff\x3f\x00\x00\x00\x00\x00\xfc\xff\xff\xff\xff\xff\x03\x00\x00\x00\x00\xe0\xff\xff\xff\xff\xff\x1f\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\xc0\xff\xff\xff\xff\xff\x0f\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\x7f\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\x03\x00\x00\x00\x00\x00\xc0\xff\xff\xff\xff\x3f\x00\x00\x00\x00\x00\x00\xfe\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x0f\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\x7f\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x03\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
'eyesopen':
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcc\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\xcc\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x03\x00\x00\x00\xcc\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\x00\x00\x00\x8c\x37\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\x1f\x00\x00\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\x7f\x00\x00\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xff\x00\x00\x0c\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x06\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x06\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x3f\x00\x00\x06\x06\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x06\x07\x00\x00\x00\x00\x80\x00\x00\x00\xfc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x03\x00\x00\x00\x00\xc0\x01\x00\x00\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x03\x00\x00\x00\x00\xc0\x03\x00\xc0\x3f\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x83\x01\x00\x00\x00\x00\x60\x03\x00\xf0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x83\x01\x00\x00\x00\x00\x60\x03\x00\xf8\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc3\x00\x00\x00\x00\x00\x20\x03\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc3\x18\x00\x00\x00\x18\x30\x03\x00\xfe\x00\x00\x00\x00\x00\x00\x00\xf8\x1f\x00\x00\x00\x00\xc3\x3f\x00\x00\x00\xfc\x30\x03\x80\x7f\x00\x00\x00\x00\x00\x00\x80\xff\xff\x01\x00\x00\x00\x83\x1f\x00\x00\x00\xee\x3b\x03\xc0\x3f\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x07\x00\x00\x80\x01\x0e\x00\x00\x00\x86\x1f\x03\xe0\x1f\x00\x00\x00\x00\x00\x00\xf8\x07\xe0\x1f\x00\x00\x80\x01\x07\x00\x00\x00\x06\x1e\x03\xf0\x0f\x00\x00\xfe\x07\x00\x00\xfc\x00\x00\x3f\x00\x00\x80\x81\x03\x00\x00\x00\x0c\x00\x03\xf8\x07\x00\xe0\xff\x7f\x00\x00\x3f\x00\x00\xfc\x00\xc0\x9f\xc1\x01\x00\x00\x00\x0c\x00\x03\xf8\x07\x00\xfc\xff\xff\x03\x80\x0f\x00\x00\xf0\x81\xff\x9f\xe1\x00\x00\x00\x00\x18\x00\x03\xe0\x03\x00\xff\x01\xf8\x0f\xc0\x07\x00\x00\xe0\xff\xff\x9f\x71\x00\x00\x00\x00\x18\x00\x03\xc0\x03\x80\x1f\x00\x80\x1f\xe0\x01\x00\x00\x80\xff\xff\x9f\x3b\x00\x00\x00\x00\x30\x00\x03\xc0\x01\xe0\x07\x00\x00\x7e\xf0\x00\x00\x00\x00\xff\xff\x1f\x1f\x00\x00\x00\x00\x60\x00\x03\x00\x00\xf0\x01\x00\x00\xf8\x70\x00\x00\x00\x00\xfe\xff\x1f\x0c\x00\x00\x00\x00\x60\x00\x03\x00\x00\xf8\x00\x00\x00\xf0\x79\x00\x00\x00\x00\xfe\xff\x3f\x00\x00\x00\x00\x00\xc0\x00\x03\x00\x00\x3c\x00\x00\x00\xc0\x3f\x00\x00\x00\x00\xfc\xff\x3f\x00\x00\x00\x00\x00\x80\x01\x03\x00\x00\x1e\x00\x00\x00\x80\x1f\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x80\x03\x03\x00\x00\x0f\x00\x00\x00\x00\x1f\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x03\x03\x00\x80\x07\x00\x00\x00\x00\x0e\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\x0e\x06\x03\x00\x80\x03\x00\x00\x00\x00\x0f\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\x7e\x0c\x03\x00\xc0\x03\x00\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xff\x3f\x00\x00\x00\x00\x00\xf6\r\x03\x00\xe0\x01\x00\x00\x00\x00\x07\x00\x00\x1f\x00\xe0\xff\x7f\x00\x00\x00\x00\x00\xc6\x0f\x03\x00\xe0\x00\x00\x00\x00\x00\x07\x00\xc0\x7f\x00\xe0\xff\x7f\x00\x00\x00\x00\x00\x0e\x06\x03\x00\xf0\x00\x00\x00\x00\x80\x03\x00\xe0\xc3\x00\xc0\xff\x7f\x00\x00\x00\x00\x00\x0c\x00\x07\x00\x70\x00\x00\x00\x00\x80\x03\x00\xf0\x81\x01\xc0\xff\x7f\x00\x00\x00\x00\x00\x18\x00\x07\x00\x70\x00\x00\x00\x00\x80\x03\x00\xf0\x81\x01\xc0\xff\x7f\x00\x00\x00\x00\x00\x18\x00\x06\x00\x38\x00\x00\x00\x3e\x80\x03\x00\xf8\x81\x03\xc0\xff\x7f\x00\x00\x00\x00\x00\x30\x00\x06\x00\x38\x00\x00\x80\xff\x80\x03\x00\xf8\x81\x03\xc0\xff\x7f\x00\x00\x00\x00\x00\x30\x00\x06\x00\x38\x00\x00\xe0\xff\x83\x03\x00\xf8\xc3\x03\xc0\xff\x07\x00\x00\x00\x00\x00\x60\x00\x06\x00\x3f\x00\x00\xe0\x0f\x83\x03\x00\xf8\xff\x03\xc0\x3f\x00\x00\x00\x00\x00\x00\xc0\x00\x06\xf0\x1f\x00\x00\xf0\x07\x86\x03\x00\xf8\xff\x03\xc0\x01\x00\x00\x00\x00\x00\x00\xc0\x01\x06\xff\x1f\x00\x00\xf0\x07\x86\x03\x00\xf0\xff\x01\xc0\x01\x00\x00\x00\x00\x00\x00\x80\x01\xfe\xff\x1f\x00\x00\xf8\x07\x8e\x03\x00\xf0\xff\x01\xc0\x01\x00\x00\x00\x00\x00\x00\x00\x03\xfe\xff\x1f\x00\x00\xf8\x07\x0e\x07\x00\xe0\xff\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x07\xfe\xff\x1f\x00\x00\xf8\x0f\x0f\x07\x00\xc0\x7f\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x06\xff\xff\x1f\x00\x00\xf8\xff\x0f\x07\x00\x00\x1f\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x0e\xff\xff\x1f\x00\x00\xf8\xff\x0f\x0f\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x3c\xfb\xff\x1f\x00\x00\xf0\xff\x07\x0e\x00\x00\x00\x00\x70\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xfb\xff\x1f\x00\x00\xf0\xff\x07\x1e\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xf9\xff\x1f\x00\x00\xe0\xff\x03\x1c\x00\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\xe0\xff\x03\x3c\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x80\xff\x00\x78\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x3e\x00\x70\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x7f\x00\x00\x00\x00\x00\xe0\x01\x00\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x7f\x00\x00\x00\x00\x00\xe0\x07\x00\x00\xe0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\xf0\x0f\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\xf0\x3f\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x01\x00\x00\x00\x00\xf8\xff\x00\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x03\x00\x00\x00\x00\xfc\xff\x07\xe0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xc3\x03\x00\x00\x00\x00\xfc\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x1f\x80\x07\x00\x00\x00\x00\xfe\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x01\x00\x0f\x00\x00\x00\x00\xff\xff\xfb\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\xf0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x01\x00\x00\xf8\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x07\x00\x00\xfe\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x00\x80\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x01\xf8\xef\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xf3\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x7f\xf0\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x07\xf0\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
'eyesclosed':
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x01\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\x01\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x01\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x01\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\x0f\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\xf8\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x07\xe0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x07\x00\x00\xfc\x00\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x7f\x00\x00\x3f\x00\x00\xfc\x00\xc0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\x03\x80\x0f\x00\x00\xf0\x81\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x01\xf8\x0f\xc0\x07\x00\x00\xe0\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x00\x80\x1f\xe0\x01\x00\x00\x80\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x07\x00\x00\x7e\xf0\x00\x00\x00\x00\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x01\x00\x00\xf8\x70\x00\x00\x00\x00\xfe\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\xf0\x79\x00\x00\x00\x00\xfe\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\xc0\x3f\x00\x00\x00\x00\xfc\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x80\x1f\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x1f\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x07\x00\x00\x00\x00\x0e\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\x00\x00\x00\x00\x0f\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x00\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xff\x3f\x00\x00\x3c\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xff\x7f\x00\xe0\xff\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xff\x7f\x00\xfe\xc3\x00\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x80\x03\x00\x00\x00\x00\xc0\xff\x7f\xf8\x1f\x80\x01\x00\x00\x00\x00\x00\x70\x00\x00\x00\x00\x80\x03\x00\x00\x00\xfc\xc3\xff\x7f\xf8\x01\xc0\x00\x00\x00\x00\x00\x00\x70\x00\x00\x00\x00\x80\x03\x00\x00\x80\xff\xc7\xff\x7f\x0c\x00\x60\x00\x00\x00\x00\x00\x00\x38\x00\x00\x00\x3e\x80\x03\x00\x00\xfc\xff\xcf\xff\x7f\x0c\x00\x70\x00\x00\x00\x00\x00\x00\x38\x00\x00\x80\xff\x80\x03\x00\xc0\xff\xff\xcf\xff\x7f\x0c\x00\x38\x00\x00\x00\x00\x00\x00\x38\x00\x00\xe0\xff\x83\x03\x00\xfe\xff\xff\xc7\xff\x07\x06\x00\x1c\x00\x00\x00\x00\x00\x00\x3f\x00\x00\xe0\x0f\x83\x03\xc0\xff\xff\xff\xc1\x3f\x00\x06\x00\x0e\x00\x00\x00\x00\x00\xf0\x1f\x00\x00\xf0\x07\x86\x03\xfe\xff\xff\x03\xc0\x01\x00\x1e\x00\xfe\x03\x00\x00\x00\x00\xff\x1f\x00\x00\xf0\x07\x86\x83\xff\x7f\x00\x00\xc0\x01\x00\x3c\x00\xfe\x03\x00\x00\x00\xf8\xff\x1f\x00\x00\xf8\x07\x8e\xc3\x7f\x00\x00\x00\xc0\x01\x00\xf0\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\xf8\x07\x0e\xc7\x03\x00\x00\x00\xe0\x00\x00\xc0\x01\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\xf8\x0f\x0f\x07\x00\x00\x00\x00\xe0\x00\x00\x80\x07\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\xf8\xff\x0f\x07\x00\x00\x00\x00\xe0\x00\x00\x00\x0e\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\xf8\xff\x0f\x0f\x00\x00\x00\x00\xf0\x00\x00\x00\x3c\x00\x00\x00\x00\x00\xf8\xff\x1f\x00\x00\xf0\xff\x07\x0e\x00\x00\x00\x00\x70\x00\x00\x00\xf0\x00\x00\x00\x00\x00\xf8\xff\x1f\x00\x00\xf0\xff\x07\x1e\x00\x00\x00\x00\x78\x00\x00\x00\xe0\x01\x00\x00\x00\x00\xf8\xff\x1f\x00\x00\xe0\xff\x03\x1c\x00\x00\x00\x00\x38\x00\x00\x00\x80\x07\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\xe0\xff\x03\x3c\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x0f\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x80\xff\x00\x78\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x3c\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x3e\x00\x70\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x78\x00\x00\x00\x00\xf0\xff\x3f\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x00\x0f\x00\x00\x00\x00\xe0\x01\x00\x00\x00\xf0\xff\x7f\x00\x00\x00\x00\x00\xe0\x01\x00\x00\x80\x07\x00\x00\x00\x00\x80\x03\x00\x00\x00\xf0\xff\x7f\x00\x00\x00\x00\x00\xe0\x07\x00\x00\xe0\x03\x00\x00\x00\x00\x00\x03\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\xf0\x0f\x00\x00\xf0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\xf0\x3f\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x01\x00\x00\x00\x00\xf8\xff\x00\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x07\xe0\xff\xff\x03\x00\x00\x00\x00\xfc\xff\x07\xe0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xc1\xff\xc3\x03\x00\x00\x00\x00\xfc\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x0e\xfc\xc3\x1f\x80\x07\x00\x00\x00\x00\xfe\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00\xc3\x01\x00\x0f\x00\x00\x00\x00\xff\xff\xfb\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x03\x00\x00\x1e\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x03\x00\x00\x3c\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x03\x00\x00\xf8\x00\x00\x00\xf0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x80\x01\x00\x00\xf0\x01\x00\x00\xf8\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x80\x01\x00\x00\xe0\x07\x00\x00\xfe\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\xc0\x00\x00\x00\x80\x1f\x00\x80\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xc0\x00\x00\x00\x00\xff\x01\xf8\xef\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\xe0\x00\x00\x00\x00\xfc\xff\xff\xf3\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x70\x00\x00\x00\x00\xe0\xff\x7f\xf0\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3e\x00\x38\x00\x00\x00\x00\x00\xfe\x07\xf0\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x1c\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x01\x0e\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x07\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x83\x03\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xc1\x01\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xe0\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70\x70\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x3c\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x0e\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x07\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x87\x03\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc3\x01\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe1\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
'flowers':
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x80\xff\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x0f\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x1f\x00\x00\x00\x00\x00\x00\x00\xf0\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x38\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\x7f\x00\x00\x00\x00\x00\x00\x00\x18\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x80\x01\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x0e\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x06\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\xff\x03\x00\x00\x00\x00\x00\x00\x06\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\xff\x03\x00\x00\x00\x00\x00\x00\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\xff\x07\x00\x00\x00\x00\x00\x00\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\xff\x07\x00\x00\x00\x00\xfc\x01\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\x0f\x00\x00\x00\x80\x07\x0e\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\x0f\x00\x00\x00\xc0\x00\x10\x03\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\x0f\x00\x00\x00\x70\x00\x60\x03\x00\x00\x06\x00\x00\x00\x00\x00\xf8\x0f\xe0\xff\xff\xff\xff\x0f\x00\x00\x00\x38\x00\xc0\x06\x00\x00\x03\x00\x00\x00\x00\x00\xff\x7f\xe0\xff\xff\xff\xff\x0f\x00\x00\x00\x18\x00\x80\x06\x00\x00\x03\x00\x00\x00\x00\xc0\xff\xff\xe1\xff\xff\xff\xff\x0f\x00\x00\x00\x0c\x00\x00\x0f\x00\x80\x03\x00\x00\x00\x00\xf0\xff\xff\xe7\xff\xff\xff\xff\x0f\x00\x00\x00\x06\x00\x00\xee\x1f\x80\xe1\x0f\x00\x00\x00\xf8\xff\xff\xef\xff\xff\xff\xff\x0f\x00\x00\x00\x06\x00\x00\xfe\xff\xc0\xfc\x7f\x00\x00\x00\xfc\xff\xff\xff\xff\xff\xff\xff\x0f\x00\x00\x00\x06\x00\x00\xff\xff\xc3\x1e\xf0\x00\x00\x00\xfe\xff\xff\xff\xff\xff\xff\xff\x07\x00\x00\x00\x03\x00\x80\xff\xff\xef\x07\xc0\x03\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x07\xff\x01\x00\x03\x00\xc0\xff\xff\xff\x01\x00\x07\x00\x80\xff\xff\xff\xff\xff\xff\xff\xff\xe7\xff\x0f\x00\x03\x00\xe0\xff\xff\xff\x00\x00\x06\x00\x80\xff\xff\xff\xff\xff\xff\xff\xff\xfb\xff\x3f\x00\x03\x00\xf0\xff\xff\x7f\x00\x00\x0c\x00\xc0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x03\x00\xf8\xff\xff\x7f\x00\x00\x1c\x00\xc0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x03\x00\xf8\xff\xff\x7f\x00\x00\x18\x00\xe0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\x07\x00\xfc\xff\xff\xff\x00\x00\x18\x00\xe0\xff\xff\xff\xff\xff\x00\xfe\xff\xff\xff\xff\x07\x06\x00\xfc\xff\xff\xff\x00\x00\x30\x00\xe0\xff\xff\xff\xff\x1f\x00\xf0\xff\xff\xff\xff\x0f\x06\x00\xfc\xff\xff\xff\x00\x00\x30\x00\xf0\xff\xff\xff\xff\x07\x00\xc0\xff\xff\xff\xff\x1f\x0e\x00\xfe\xff\xff\xff\x01\x00\x30\x00\xf0\xff\xff\xff\xff\x01\x00\x00\xff\xff\xff\xff\x1f\x1c\x00\xfe\xff\xff\xff\x01\x00\x30\x00\xf0\xff\xff\xff\xff\x00\x00\x00\xfe\xff\xff\xff\x3f\x38\x00\xfe\xff\xff\xff\x01\x00\x30\x00\xf0\xff\xff\xff\x7f\x00\x00\x00\xfc\xff\xff\xff\x3f\x78\x00\xfe\xff\xff\xff\x01\x00\x30\x00\xf0\xff\xff\xff\x3f\x00\x00\x00\xf8\xff\xff\xff\x7f\xf0\x00\xfe\xff\xff\xff\x01\x00\x30\x00\xf0\xff\xff\xff\x1f\x00\x00\x00\xf0\xff\xff\xff\x7f\xc0\x07\xfe\xff\xff\xff\x01\x00\x18\x00\xf0\xff\xff\xff\x0f\x00\x00\x00\xe0\xff\xff\xff\x7f\x80\xff\xff\xff\xff\xff\x01\x00\x18\x00\xf0\xff\xff\xff\x0f\x00\x00\x00\xe0\xff\xff\xff\xff\x00\xfc\xff\xff\xff\xff\x01\x00\x1c\x00\xf0\xff\xff\xff\x07\x00\x00\x00\xc0\xff\xff\xff\xff\x00\xfe\xff\xff\xff\xff\x00\x00\x0c\x00\xe0\xff\xff\xff\x07\x00\x00\x00\xc0\xff\xff\xff\xff\x00\x1f\xfc\xff\xff\xff\x00\x00\x06\x00\xe0\xff\xff\xff\x03\x00\x00\x00\x80\xff\xff\xff\xff\xc0\x07\xfc\xff\xff\xff\x01\x00\x07\x00\xe0\xff\xff\xff\x03\x00\x00\x00\x80\xff\xff\xff\xff\xe0\x01\xf8\xff\xff\xff\x07\xc0\x03\x00\xc0\xff\xff\xff\x03\x00\x00\x00\x80\xff\xff\xff\xff\xe0\x00\xf8\xff\xff\xff\x1f\xf0\x00\x00\xc0\xff\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\xff\x70\x00\xf0\xff\xff\x3f\xff\x7f\x00\x00\x80\xff\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\xff\x78\x00\xe0\xff\xff\x1f\xfc\x0f\x00\x00\x80\xff\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\xff\x38\x00\xc0\xff\xff\x0f\x18\x00\x00\x00\x00\xff\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x7f\x38\x00\x80\xff\xff\x07\x30\x00\x00\x00\x00\xfe\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x7f\x1c\x00\x00\xff\xff\x03\x70\x00\x00\x00\x00\xfc\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x7f\x1c\x00\x00\xfc\xff\x00\x60\x00\x00\x00\x00\xf8\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x3f\x1c\x00\x00\xf0\x1f\x00\x60\x00\x00\x00\x00\xf0\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x3f\x1c\x00\x00\x70\x00\x00\xc0\x00\x00\x00\x00\xc0\xff\xff\x01\x00\x00\x00\x00\xff\xff\xff\x1f\x1c\x00\x00\x70\x00\x00\xc0\x00\x00\x00\x00\x00\xff\xff\x03\x00\x00\x00\x80\xff\xff\xff\x1f\x1c\x00\x00\x70\x00\x00\xc0\x00\x00\x00\x00\x00\xf8\xff\x03\x00\x00\x00\x80\xff\xff\xff\x0f\x1c\x00\x00\x70\x00\x00\xc0\x00\x00\x00\x00\x00\xe0\xff\x03\x00\x00\x00\x80\xff\xff\xff\x07\x38\x00\x00\x68\x00\x00\xc0\x00\x00\x00\x00\x00\xf8\xff\x07\x00\x00\x00\xc0\xff\xff\xff\x03\x38\x00\x00\x68\x00\x00\xc0\x00\x00\x00\x00\x00\xfc\xff\x07\x00\x00\x00\xc0\xff\xff\xff\x01\x78\x00\x00\x6c\x00\x00\xc0\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\xe0\xff\xff\xff\x00\x70\x00\x00\xc4\x00\x00\x60\x00\x00\x00\x00\x00\xff\xff\x0f\x00\x00\x00\xe0\xff\xff\x3f\x00\xe0\x00\x00\xc2\x00\x00\x60\x00\x00\x00\x00\x80\xff\xff\x1f\x00\x00\x00\xf0\xff\xff\x0f\x00\xe0\x01\x00\xc3\x01\x00\x70\x00\x00\x00\x00\xc0\xff\xff\x3f\x00\x00\x00\xf8\xff\xff\x01\x00\xc0\x07\xc0\x81\x01\x00\x30\x00\x00\x00\x00\xc0\xff\xff\x7f\x00\x00\x00\xfc\xff\x03\x00\x00\x00\x1f\x70\x00\x03\x00\x18\x00\x00\x00\x00\xe0\xff\xff\xff\x00\x00\x00\xfe\xff\x07\x00\x00\x00\xfe\x3f\x00\x07\x00\x1c\x00\x00\x00\x00\xe0\xff\xff\xff\x01\x00\x00\xff\xff\x0f\x00\x00\x00\xf0\x07\x00\x1e\x00\x0f\x00\x00\x00\x00\xf0\xff\xff\xff\x07\x00\xc0\xff\xff\x0f\x00\x00\x00\x00\x00\x00\x78\xc0\x03\x00\x00\x00\x00\xf0\xff\xff\xff\x1f\x00\xf0\xff\xff\x1f\x00\xe0\x07\x00\x00\x00\xf0\xff\x01\x00\x00\x00\x00\xf0\xff\xff\xff\xff\x00\xfe\xff\xff\x1f\x00\xf8\x1f\x00\x00\x00\x80\x3f\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x3f\x00\xfe\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x3f\x00\xff\xff\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x3f\x00\x7f\xf0\x80\xff\x01\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x80\x1f\xc0\xe1\xff\x07\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x80\x07\x00\xf1\xe0\x0f\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x07\x00\x33\x80\x0f\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x03\x00\n\x00\x1e\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x03\x00\n\x00\x1e\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x01\x00\x04\x00\x3c\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x01\x00\x04\x00\x3c\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\xc0\x01\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x80\x01\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\xfe\xff\xff\xff\x3f\x80\x01\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\xff\xff\xfe\xff\xff\xff\x3f\x00\x01\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\x7f\xfe\xff\xff\xff\x3f\x00\x03\x00\x02\x00\x18\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\x7f\xfc\xff\xff\xff\x1f\x00\x02\x00\x06\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\x3f\xfc\xff\xff\xff\x1f\xc0\x07\x00\x07\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x1f\xf8\xff\xff\xff\x0f\xf0\x01\x00\x0f\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xff\x0f\xf8\xff\xff\xff\x0f\x7c\x00\xc0\x0f\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\x07\xf0\xff\xff\xff\x07\x1e\x00\xf0\x3f\x80\x01\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\xff\x03\xe0\xff\xff\xff\x03\x1e\x00\xfc\xff\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\xff\x00\xc0\xff\xff\xff\x01\x0f\x00\xf8\xff\x03\x38\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x80\xff\xff\xff\x00\x0f\x00\xf8\xff\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x07\x00\x00\xff\xff\x7f\x80\x07\x00\xf0\xff\x00\xe0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x1f\x80\x07\x00\xf0\x7f\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x07\x80\x07\x00\xf0\x7f\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x00\x80\x07\x00\xf0\x3f\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x07\x00\xf0\x3f\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x0f\x00\x38\x38\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x08\x20\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x01\x00\x00\xc0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xc1\x01\x00\x00\xe0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xff\x00\x00\x00\xe0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x00\x00\x02\xf8\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x9f\x00\x00\x0e\xfe\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\xfe\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\xf2\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x01\x00\xc3\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x0f\xe0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x3f\xf8\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
'tacho':
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xff\xff\xff\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x7f\x09\x10\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x07\x09\x10\xe0\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xff\x00\r\x10\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x1f\x00\x05\x10\x00\xf9\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x05\x00\x00\xc9\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x02\x00\x05\x00\x00\x05\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x3f\x02\x00\x05\x00\x80\x04\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x0f\x06\x00\x02\x00\x80\x02\xf0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x04\x00\x02\x00\x80\x02\xc0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xc1\x00\x00\x02\x00\x80\x01\x40\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xc0\x01\x00\x02\x00\x40\x01\x60\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x3f\x80\x01\x00\x02\x00\xc0\x00\x20\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x1f\x80\x03\x00\x00\x00\xc0\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x1f\x00\x07\x00\x00\x00\x40\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x13\x00\x07\xcc\x03\x00\x00\x00\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x27\x00\n\x08\xfa\xc0\x02\x00\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x2c\x00\x0e\x08\x8a\x80\x82\x0f\x00\x40\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x68\x00\x14\xe8\x8b\x80\x82\x08\x00\x20\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x3f\x50\x00\x1c\x28\x88\x80\xa2\x08\x00\xd0\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x60\x00\x2c\x28\x88\x80\xbe\x08\x00\x38\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x1f\xc0\x00\x38\xe8\xfb\x80\xa0\x08\x00\x0c\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x0f\xc0\x00\x58\x00\x00\x80\xa0\x0f\x00\x03\x70\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x07\x80\x00\xf0\x00\x00\x00\x00\x00\x00\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x03\x00\x00\xb0\x00\x00\x00\x00\x00\x00\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x03\x00\x00\x70\x01\x00\x00\x00\x00\x00\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x06\x00\x00\xe0\x01\x00\x00\x00\xec\x01\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x0c\x60\x00\xa0\x02\x00\x00\x00\x28\xf8\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x18\x40\xdf\xc7\x03\x00\x00\x00\x28\x88\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x40\x51\x44\x05\x00\x00\x00\xe8\x8b\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x40\x51\xc4\x0e\x00\x00\x00\x28\x8a\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x80\x3f\x00\x40\x51\x84\n\x00\x00\x00\x28\x8a\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x00\x40\x51\x84\x14\x00\x00\x00\xe8\xfb\x00\x80\x19\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x1f\x00\x40\xdf\x07\x15\x00\x00\x00\x00\x00\x00\x00\x38\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x0f\x00\x00\x00\x00\x29\x00\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x0f\x00\x00\x00\x00\x2b\x00\x00\x00\x00\x00\x00\x00\x70\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x07\x00\x00\x00\x00\x52\x00\x00\x00\x00\x00\x00\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x07\x00\x00\x00\x00\x72\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x0f\x00\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x33\x00\x00\x00\x00\xe4\x01\x00\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x43\x00\x00\x00\x00\x4c\x01\x00\x00\x00\x00\x00\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\xf8\x8f\x01\x00\x00\x00\x88\x02\x00\x00\x00\x00\x00\x00\xc0\x01\x00\x00\x00\x00\x00\x00\x00\xf8\x71\xc6\x07\x00\x00\x88\x02\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\x00\xf8\x81\x4f\xf4\x01\x00\x10\x05\x00\x00\x00\xc0\x3e\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x01\x40\x14\x01\x00\x10\x05\x00\x00\x00\x80\xa2\x0f\xe0\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x00\xc0\x17\x01\x00\x30\n\x00\x00\x00\x80\xa2\x08\x38\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x40\x14\x01\x00\x20\x1a\x00\x00\x00\x80\xbe\x08\x07\x03\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x40\x14\x01\x00\x20\x14\x00\x00\x00\x80\xa2\xe8\xff\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\xc0\xf7\x01\x00\x40\x2c\x00\x00\x00\x80\xa2\x08\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x40\x28\x00\x00\x00\x80\xbe\x0f\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\xc0\x50\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x80\x50\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x80\xa0\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x00\x00\x00\xa1\x03\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x7e\x0e\x00\x00\x00\x00\x00\xe1\x1f\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x72\x70\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x1e\xc0\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x0e\x80\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x06\x00\x01\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x07\x00\x03\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x03\x00\x02\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\xe0\x01\x00\x00\x80\x03\x00\x02\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x20\xf8\x00\x00\x80\x03\x00\x02\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xff\x00\x20\x88\x00\x00\x80\x03\x00\x02\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\xff\x0f\xe0\x8b\x00\x00\x00\x03\x00\x02\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\xfe\xf0\x20\x8a\x00\x00\x00\x07\x00\x01\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\xc0\x27\x8a\x00\x00\x00\x07\x00\x01\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x3f\xe0\xfb\x00\x00\x00\x0e\x80\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x3e\xc0\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x7c\x70\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\xf0\x1f\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x01\x00\x00\x00\x00\x00\xc0\x07\x00\x00\x00\x00\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\xfe\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x07\x00\x00\x00\x00\x00\x00\x00\xfc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x00\xfc\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\n\x00\x00\xc0\x03\x00\x00\x00\x00\x00\x00\x00\xf8\x67\x00\x00\x00\x00\x00\x00\x00\x04\x00\n\x00\x00\xe0\x01\x00\x00\x00\x00\x00\x00\x00\xf8\x1f\x00\x02\x00\x00\x00\x00\x00\x24\x00\x09\x00\x00\xe0\x01\x00\x00\x00\x00\x00\x00\x00\xf8\x07\x00\x82\x0f\x00\x00\x00\x00\x94\n\x79\x00\x00\xe0\x01\x00\x00\x00\x00\x00\x00\x00\xf0\x0f\x00\x82\x08\x00\x00\x00\x00\x8c\x95\x48\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x0f\x00\xa2\x08\x00\x00\x00\x00\x94\x94\x48\x00\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x1f\x00\xbe\x08\x00\x00\x00\x00\xa4\x54\x48\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x1f\x00\xa0\x08\x00\x00\x00\x00\x00\x40\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x3f\x00\xa0\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x3e\x00\x00\x00\x00\x00\x00\x00\xc0\x3f\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\x07\x00\x00\x00\x00\x00\x00\xc0\xff\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\x0f\x00\x00\x00\x00\x00\x00\x80\x7f\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x3e\x00\x00\x00\x00\x00\x00\xf8\xff\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x70\x00\x00\x00\x00\x00\xc0\xff\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x0f\x00\xe0\x01\x00\x00\x00\x00\xe0\x1f\xfe\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x03\x00\x80\x01\x00\x00\x00\x00\xf8\x01\xf8\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x01\x00\x00\x03\x00\x00\x00\x00\x7c\x00\xe0\x07\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\xf0\x00\x00\x00\x07\x00\x00\x00\x00\xbe\x00\x90\x0f\x00\x00\xa0\x0f\x00\x00\x00\x00\x00\x00\xf0\x00\x3e\x00\x06\x00\x00\x00\x00\x9f\x00\x10\x1e\x00\x00\xa0\x08\x00\x00\x00\x00\x00\x00\x78\x80\xc1\x00\x0c\x00\x00\x00\x00\xcf\x00\x30\x1e\x00\x00\xbe\x08\x00\x00\x00\x00\x00\x00\x3c\x80\x80\x00\x1c\x00\x00\x00\x80\x87\x04\x12\x3c\x00\x00\x82\x08\x00\x00\x00\x00\x00\x00\x3c\x80\x80\x00\x1c\x00\x00\x00\xc0\xc3\x9b\x3d\x78\x20\x00\x82\x08\x00\x00\x00\x00\x00\x00\x3e\x80\xbe\x00\x18\x00\x00\x00\xc0\x83\x91\x18\xf8\x10\x00\xbe\x0f\x00\x00\x00\x00\x00\x00\x1f\x80\x80\x00\x18\x00\x00\x00\xc0\xc3\x64\x32\xf8\x09\x00\x00\x00\x00\x00\x00\x00\x00\x80\x1f\x80\xbe\x00\x18\x00\x00\x00\xc0\xc1\xf1\x38\xf0\x07\x00\x10\x00\x00\x00\x00\x00\x00\xc0\x1f\x80\x80\x00\x38\x00\x00\x00\xc0\x01\xfa\x05\xf0\x0f\x00\x18\x00\x00\x00\x00\x00\x00\xf0\x1f\x80\xbe\x00\x38\x00\x00\x00\xe0\x01\x64\x02\xf0\x1f\x00\x0c\x00\x00\x00\x00\x00\x00\xf8\x1f\x80\x80\x00\x38\x00\x00\x00\xe0\x01\x60\x00\xf0\x3f\x00\n\x00\x00\x00\x00\x00\x00\xfc\x1f\x80\xbe\x00\x38\x00\x00\x00\xe0\x01\x60\x00\xf0\x7f\x80\x05\x00\x00\x00\x00\x00\x00\xfe\x1f\x80\x80\x00\x38\x00\x00\x00\xe0\x01\xfe\x07\xf0\xff\x41\x06\x00\x00\x00\x00\x00\x00\xff\x1c\x80\xbe\x00\x18\x00\x00\x00\xc0\x03\x62\x04\xf8\xff\x33\x02\x00\x00\x00\x00\x00\xc0\x7f\x3c\x80\x80\x00\x1c\x00\x00\x00\xc0\x03\x63\x0c\x88\xff\x1f\x03\x00\x00\x00\x00\x00\xf0\x1f\x3c\x80\xbe\x00\x1c\x00\x00\x00\xc0\x83\x03\x1c\x08\xff\x3f\x01\x00\x00\x00\x00\x00\xfc\x0f\x3c\x80\x80\x00\x1c\x00\x00\x00\xc0\x87\x03\x1c\x0c\xfe\xff\x01\x00\x00\x00\x00\x00\xff\x07\x7c\x80\xff\x00\x1e\x00\x00\x00\xc0\x8f\xff\x1f\x0e\xfc\xff\x03\x00\x00\x00\x00\xc0\xff\x03\xf8\x00\x00\x00\x0f\x00\x00\x00\x80\x9f\x07\x1e\x06\xf8\xff\x1f\x00\x00\x00\x00\xf8\xff\x01\xf0\x00\x00\x00\x07\x00\x00\x00\x00\x3f\x03\x0c\x03\xe0\xff\xff\x00\x00\x00\x00\xff\x7f\x00\xf0\x01\x00\x80\x07\x00\x00\x00\x00\x7f\x00\xc0\x03\x80\xff\xff\x07\x00\x00\xe0\xff\x1f\x00\xe0\x03\x00\xc0\x03\x00\x00\x00\x00\xfe\x01\xe0\x01\x00\xfe\xff\xff\x07\xe0\xff\xff\x07\x00\xc0\x0f\x00\xf0\x01\x00\x00\x00\x00\xfc\x0f\xfc\x00\x00\xf8\xff\xff\xff\xff\xff\xff\x01\x00\xc0\x1f\x00\xf8\x01\x00\x00\x00\x00\xf8\xff\x7f\x00\x00\xe0\xff\xff\xff\xff\xff\x7f\x00\x00\x00\xff\x00\x7f\x00\x00\x00\x00\x00\xe0\xff\x1f\x00\x00\x80\xff\xff\xff\xff\xff\x1f\x00\x00\x00\xfe\xff\x3f\x00\x00\x00\x00\x00\xc0\xff\x0f\x00\x00\x00\xfc\xff\xff\xff\xff\x03\x00\x00\x00\xf8\xff\x0f\x00\x00\x00\x00\x00\x00\xfc\x00\x00\x00\x00\xe0\xff\xff\xff\x7f\x00\x00\x00\x00\xf0\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xff\xff\x07\x00\x00\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
}
| 4,539.692308
| 11,788
| 0.748763
| 14,731
| 59,016
| 2.999728
| 0.009436
| 1.112177
| 1.442192
| 1.682591
| 0.952975
| 0.936953
| 0.915364
| 0.884406
| 0.850393
| 0.80864
| 0
| 0.398646
| 0.001254
| 59,016
| 12
| 11,789
| 4,918
| 0.351057
| 0
| 0
| 0
| 0
| 0.416667
| 0.998001
| 0.997323
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 17
|
fe874efea71053ee6af830c0569021af4192e452
| 128
|
py
|
Python
|
app/component/sink/__init__.py
|
melphi/kafkaform
|
391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6
|
[
"MIT"
] | null | null | null |
app/component/sink/__init__.py
|
melphi/kafkaform
|
391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6
|
[
"MIT"
] | null | null | null |
app/component/sink/__init__.py
|
melphi/kafkaform
|
391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6
|
[
"MIT"
] | null | null | null |
from app.component.sink.parser import *
from app.component.sink.resolver import *
from app.component.sink.transitioner import *
| 32
| 45
| 0.8125
| 18
| 128
| 5.777778
| 0.444444
| 0.201923
| 0.461538
| 0.576923
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 128
| 3
| 46
| 42.666667
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
229590ba807f258a2c04f2a6c09b50506523cb22
| 30,785
|
py
|
Python
|
ding/model/template/q_learning.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | null | null | null |
ding/model/template/q_learning.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | null | null | null |
ding/model/template/q_learning.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, Optional, Dict, Callable, List
import torch
import torch.nn as nn
from ding.torch_utils import get_lstm
from ding.utils import MODEL_REGISTRY, SequenceType, squeeze
from ..common import FCEncoder, ConvEncoder, DiscreteHead, DuelingHead, MultiHead, RainbowHead, \
QuantileHead, QRDQNHead, DistributionHead
@MODEL_REGISTRY.register('dqn')
class DQN(nn.Module):
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
dueling: bool = True,
head_hidden_size: Optional[int] = None,
head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None
) -> None:
"""
Overview:
Init the DQN (encoder + head) Model according to input arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation space shape, such as 8 or [4, 84, 84].
- action_shape (:obj:`Union[int, SequenceType]`): Action space shape, such as 6 or [2, 3, 3].
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``, \
the last element must match ``head_hidden_size``.
- dueling (:obj:`dueling`): Whether choose ``DuelingHead`` or ``DiscreteHead(default)``.
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` of head network.
- head_layer_num (:obj:`int`): The number of layers used in the head network to compute Q value output
- activation (:obj:`Optional[nn.Module]`): The type of activation function in networks \
if ``None`` then default set it to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`): The type of normalization in networks, see \
``ding.torch_utils.fc_block`` for more details.
"""
super(DQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own DQN".format(obs_shape)
)
# Head Type
if dueling:
head_cls = DuelingHead
else:
head_cls = DiscreteHead
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
head_cls,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
activation=activation,
norm_type=norm_type
)
else:
self.head = head_cls(
head_hidden_size, action_shape, head_layer_num, activation=activation, norm_type=norm_type
)
def forward(self, x: torch.Tensor) -> Dict:
r"""
Overview:
DQN forward computation graph, input observation tensor to predict q_value.
Arguments:
- x (:obj:`torch.Tensor`): Observation inputs
Returns:
- outputs (:obj:`Dict`): DQN forward outputs, such as q_value.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Discrete Q-value output of each action dimension.
Shapes:
- x (:obj:`torch.Tensor`): :math:`(B, N)`, where B is batch size and N is ``obs_shape``
- logit (:obj:`torch.FloatTensor`): :math:`(B, M)`, where B is batch size and M is ``action_shape``
Examples:
>>> model = DQN(32, 6) # arguments: 'obs_shape' and 'action_shape'
>>> inputs = torch.randn(4, 32)
>>> outputs = model(inputs)
>>> assert isinstance(outputs, dict) and outputs['logit'].shape == torch.Size([4, 6])
"""
x = self.encoder(x)
x = self.head(x)
return x
@MODEL_REGISTRY.register('c51dqn')
class C51DQN(nn.Module):
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
head_hidden_size: int = None,
head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
v_min: Optional[float] = -10,
v_max: Optional[float] = 10,
n_atom: Optional[int] = 51,
) -> None:
r"""
Overview:
Init the C51 Model according to input arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation's space.
- action_shape (:obj:`Union[int, SequenceType]`): Action's space.
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to ``Head``.
- head_layer_num (:obj:`int`): The num of layers used in the network to compute Q value output
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details`
- n_atom (:obj:`Optional[int]`): Number of atoms in the prediction distribution.
"""
super(C51DQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own C51DQN".format(obs_shape)
)
# Head Type
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
DistributionHead,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
activation=activation,
norm_type=norm_type,
n_atom=n_atom,
v_min=v_min,
v_max=v_max,
)
else:
self.head = DistributionHead(
head_hidden_size,
action_shape,
head_layer_num,
activation=activation,
norm_type=norm_type,
n_atom=n_atom,
v_min=v_min,
v_max=v_max,
)
def forward(self, x: torch.Tensor) -> Dict:
r"""
Overview:
Use observation tensor to predict C51DQN's output.
Parameter updates with C51DQN's MLPs forward setup.
Arguments:
- x (:obj:`torch.Tensor`):
The encoded embedding tensor w/ ``(B, N=head_hidden_size)``.
Returns:
- outputs (:obj:`Dict`):
Run with encoder and head. Return the result prediction dictionary.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Logit tensor with same size as input ``x``.
- distribution (:obj:`torch.Tensor`): Distribution tensor of size ``(B, N, n_atom)``
Shapes:
- x (:obj:`torch.Tensor`): :math:`(B, N)`, where B is batch size and N is head_hidden_size.
- logit (:obj:`torch.FloatTensor`): :math:`(B, M)`, where M is action_shape.
- distribution(:obj:`torch.FloatTensor`): :math:`(B, M, P)`, where P is n_atom.
Examples:
>>> model = C51DQN(128, 64) # arguments: 'obs_shape' and 'action_shape'
>>> inputs = torch.randn(4, 128)
>>> outputs = model(inputs)
>>> assert isinstance(outputs, dict)
>>> # default head_hidden_size: int = 64,
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default n_atom: int = 51
>>> assert outputs['distribution'].shape == torch.Size([4, 64, 51])
"""
x = self.encoder(x)
x = self.head(x)
return x
@MODEL_REGISTRY.register('qrdqn')
class QRDQN(nn.Module):
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
head_hidden_size: Optional[int] = None,
head_layer_num: int = 1,
num_quantiles: int = 32,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
) -> None:
r"""
Overview:
Init the QRDQN Model according to input arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation's space.
- action_shape (:obj:`Union[int, SequenceType]`): Action's space.
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to ``Head``.
- head_layer_num (:obj:`int`): The num of layers used in the network to compute Q value output
- num_quantiles (:obj:`int`): Number of quantiles in the prediction distribution.
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details`
"""
super(QRDQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own QRDQN".format(obs_shape)
)
# Head Type
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
QRDQNHead,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
num_quantiles=num_quantiles,
activation=activation,
norm_type=norm_type,
)
else:
self.head = QRDQNHead(
head_hidden_size,
action_shape,
head_layer_num,
num_quantiles=num_quantiles,
activation=activation,
norm_type=norm_type,
)
def forward(self, x: torch.Tensor) -> Dict:
r"""
Overview:
Use observation tensor to predict QRDQN's output.
Parameter updates with QRDQN's MLPs forward setup.
Arguments:
- x (:obj:`torch.Tensor`):
The encoded embedding tensor with ``(B, N=hidden_size)``.
Returns:
- outputs (:obj:`Dict`):
Run with encoder and head. Return the result prediction dictionary.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Logit tensor with same size as input ``x``.
- q (:obj:`torch.Tensor`): Q valye tensor tensor of size ``(B, N, num_quantiles)``
- tau (:obj:`torch.Tensor`): tau tensor of size ``(B, N, 1)``
Shapes:
- x (:obj:`torch.Tensor`): :math:`(B, N)`, where B is batch size and N is head_hidden_size.
- logit (:obj:`torch.FloatTensor`): :math:`(B, M)`, where M is action_shape.
- tau (:obj:`torch.Tensor`): :math:`(B, M, 1)`
Examples:
>>> model = QRDQN(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = model(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default num_quantiles : int = 32
>>> assert outputs['q'].shape == torch.Size([4, 64, 32])
>>> assert outputs['tau'].shape == torch.Size([4, 32, 1])
"""
x = self.encoder(x)
x = self.head(x)
return x
@MODEL_REGISTRY.register('iqn')
class IQN(nn.Module):
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
head_hidden_size: Optional[int] = None,
head_layer_num: int = 1,
num_quantiles: int = 32,
quantile_embedding_size: int = 128,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None
) -> None:
r"""
Overview:
Init the IQN Model according to input arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation space shape.
- action_shape (:obj:`Union[int, SequenceType]`): Action space shape.
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to ``Head``.
- head_layer_num (:obj:`int`): The num of layers used in the network to compute Q value output
- num_quantiles (:obj:`int`): Number of quantiles in the prediction distribution.
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details.
"""
super(IQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own IQN".format(obs_shape)
)
# Head Type
head_cls = QuantileHead
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
head_cls,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
num_quantiles=num_quantiles,
quantile_embedding_size=quantile_embedding_size,
activation=activation,
norm_type=norm_type
)
else:
self.head = head_cls(
head_hidden_size,
action_shape,
head_layer_num,
activation=activation,
norm_type=norm_type,
num_quantiles=num_quantiles,
quantile_embedding_size=quantile_embedding_size,
)
def forward(self, x: torch.Tensor) -> Dict:
r"""
Overview:
Use encoded embedding tensor to predict IQN's output.
Parameter updates with IQN's MLPs forward setup.
Arguments:
- x (:obj:`torch.Tensor`):
The encoded embedding tensor with ``(B, N=hidden_size)``.
Returns:
- outputs (:obj:`Dict`):
Run with encoder and head. Return the result prediction dictionary.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Logit tensor with same size as input ``x``.
- q (:obj:`torch.Tensor`): Q valye tensor tensor of size ``(num_quantiles, N, B)``
- quantiles (:obj:`torch.Tensor`): quantiles tensor of size ``(quantile_embedding_size, 1)``
Shapes:
- x (:obj:`torch.Tensor`): :math:`(B, N)`, where B is batch size and N is head_hidden_size.
- logit (:obj:`torch.FloatTensor`): :math:`(B, M)`, where M is action_shape
- quantiles (:obj:`torch.Tensor`): :math:`(P, 1)`, where P is quantile_embedding_size.
Examples:
>>> model = IQN(64, 64) # arguments: 'obs_shape' and 'action_shape'
>>> inputs = torch.randn(4, 64)
>>> outputs = model(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default num_quantiles: int = 32
>>> assert outputs['q'].shape == torch.Size([32, 4, 64]
>>> # default quantile_embedding_size: int = 128
>>> assert outputs['quantiles'].shape == torch.Size([128, 1])
"""
x = self.encoder(x)
x = self.head(x)
return x
@MODEL_REGISTRY.register('rainbowdqn')
class RainbowDQN(nn.Module):
"""
Overview:
RainbowDQN network (C51 + Dueling + Noisy Block)
.. note::
RainbowDQN contains dueling architecture by default
"""
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
head_hidden_size: Optional[int] = None,
head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
v_min: Optional[float] = -10,
v_max: Optional[float] = 10,
n_atom: Optional[int] = 51,
) -> None:
"""
Overview:
Init the Rainbow Model according to arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation space shape.
- action_shape (:obj:`Union[int, SequenceType]`): Action space shape.
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to ``Head``.
- head_layer_num (:obj:`int`): The num of layers used in the network to compute Q value output
- activation (:obj:`Optional[nn.Module]`): The type of activation function to use in ``MLP`` the after \
``layer_fn``, if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`): The type of normalization to use, see ``ding.torch_utils.fc_block`` \
for more details`
- n_atom (:obj:`Optional[int]`): Number of atoms in the prediction distribution.
"""
super(RainbowDQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own RainbowDQN".
format(obs_shape)
)
# Head Type
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
RainbowHead,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
activation=activation,
norm_type=norm_type,
n_atom=n_atom,
v_min=v_min,
v_max=v_max,
)
else:
self.head = RainbowHead(
head_hidden_size,
action_shape,
head_layer_num,
activation=activation,
norm_type=norm_type,
n_atom=n_atom,
v_min=v_min,
v_max=v_max,
)
def forward(self, x: torch.Tensor) -> Dict:
r"""
Overview:
Use observation tensor to predict Rainbow output.
Parameter updates with Rainbow's MLPs forward setup.
Arguments:
- x (:obj:`torch.Tensor`):
The encoded embedding tensor with ``(B, N=hidden_size)``.
Returns:
- outputs (:obj:`Dict`):
Run ``MLP`` with ``RainbowHead`` setups and return the result prediction dictionary.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Logit tensor with same size as input ``x``.
- distribution (:obj:`torch.Tensor`): Distribution tensor of size ``(B, N, n_atom)``
Shapes:
- x (:obj:`torch.Tensor`): :math:`(B, N)`, where B is batch size and N is head_hidden_size.
- logit (:obj:`torch.FloatTensor`): :math:`(B, M)`, where M is action_shape.
- distribution(:obj:`torch.FloatTensor`): :math:`(B, M, P)`, where P is n_atom.
Examples:
>>> model = RainbowDQN(64, 64) # arguments: 'obs_shape' and 'action_shape'
>>> inputs = torch.randn(4, 64)
>>> outputs = model(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default n_atom: int =51
>>> assert outputs['distribution'].shape == torch.Size([4, 64, 51])
"""
x = self.encoder(x)
x = self.head(x)
return x
def parallel_wrapper(forward_fn: Callable) -> Callable:
r"""
Overview:
Process timestep T and batch_size B at the same time, in other words, treat different timestep data as
different trajectories in a batch.
Arguments:
- forward_fn (:obj:`Callable`): Normal ``nn.Module`` 's forward function.
Returns:
- wrapper (:obj:`Callable`): Wrapped function.
"""
def wrapper(x: torch.Tensor) -> Union[torch.Tensor, List[torch.Tensor]]:
T, B = x.shape[:2]
def reshape(d):
if isinstance(d, list):
d = [reshape(t) for t in d]
elif isinstance(d, dict):
d = {k: reshape(v) for k, v in d.items()}
else:
d = d.reshape(T, B, *d.shape[1:])
return d
x = x.reshape(T * B, *x.shape[2:])
x = forward_fn(x)
x = reshape(x)
return x
return wrapper
@MODEL_REGISTRY.register('drqn')
class DRQN(nn.Module):
"""
Overview:
DQN + RNN = DRQN
"""
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
encoder_hidden_size_list: SequenceType = [128, 128, 64],
dueling: bool = True,
head_hidden_size: Optional[int] = None,
head_layer_num: int = 1,
lstm_type: Optional[str] = 'normal',
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None
) -> None:
r"""
Overview:
Init the DRQN Model according to arguments.
Arguments:
- obs_shape (:obj:`Union[int, SequenceType]`): Observation's space.
- action_shape (:obj:`Union[int, SequenceType]`): Action's space.
- encoder_hidden_size_list (:obj:`SequenceType`): Collection of ``hidden_size`` to pass to ``Encoder``
- head_hidden_size (:obj:`Optional[int]`): The ``hidden_size`` to pass to ``Head``.
- lstm_type (:obj:`Optional[str]`): Version of rnn cell, now support ['normal', 'pytorch', 'hpc', 'gru']
- activation (:obj:`Optional[nn.Module]`):
The type of activation function to use in ``MLP`` the after ``layer_fn``,
if ``None`` then default set to ``nn.ReLU()``
- norm_type (:obj:`Optional[str]`):
The type of normalization to use, see ``ding.torch_utils.fc_block`` for more details`
"""
super(DRQN, self).__init__()
# For compatibility: 1, (1, ), [4, 32, 32]
obs_shape, action_shape = squeeze(obs_shape), squeeze(action_shape)
if head_hidden_size is None:
head_hidden_size = encoder_hidden_size_list[-1]
# FC Encoder
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.encoder = FCEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
# Conv Encoder
elif len(obs_shape) == 3:
self.encoder = ConvEncoder(obs_shape, encoder_hidden_size_list, activation=activation, norm_type=norm_type)
else:
raise RuntimeError(
"not support obs_shape for pre-defined encoder: {}, please customize your own DRQN".format(obs_shape)
)
# LSTM Type
self.rnn = get_lstm(lstm_type, input_size=head_hidden_size, hidden_size=head_hidden_size)
# Head Type
if dueling:
head_cls = DuelingHead
else:
head_cls = DiscreteHead
multi_head = not isinstance(action_shape, int)
if multi_head:
self.head = MultiHead(
head_cls,
head_hidden_size,
action_shape,
layer_num=head_layer_num,
activation=activation,
norm_type=norm_type
)
else:
self.head = head_cls(
head_hidden_size, action_shape, head_layer_num, activation=activation, norm_type=norm_type
)
def forward(self, inputs: Dict, inference: bool = False) -> Dict:
r"""
Overview:
Use observation tensor to predict DRQN output.
Parameter updates with DRQN's MLPs forward setup.
Arguments:
- inputs (:obj:`Dict`):
ArgumentsKeys:
- obs (:obj:`torch.Tensor`): Encoded observation
- prev_state (:obj:`list`): Previous state's tensor of size ``(B, N)``
Returns:
- outputs (:obj:`Dict`):
Run ``MLP`` with ``DRQN`` setups and return the result prediction dictionary.
ReturnsKeys:
- logit (:obj:`torch.Tensor`): Logit tensor with same size as input ``obs``.
- next_state (:obj:`list`): Next state's tensor of size ``(B, N)``
Shapes:
- obs (:obj:`torch.Tensor`): :math:`(B, N=obs_space)`, where B is batch size.
- prev_state(:obj:`torch.FloatTensor list`): :math:`[(B, N)]`
- logit (:obj:`torch.FloatTensor`): :math:`(B, N)`
- next_state(:obj:`torch.FloatTensor list`): :math:`[(B, N)]`
Examples:
>>> # Init input's Keys:
>>> prev_state = [[torch.randn(1, 1, 64) for __ in range(2)] for _ in range(4)] # B=4
>>> obs = torch.randn(4,64)
>>> model = DRQN(64, 64) # arguments: 'obs_shape' and 'action_shape'
>>> outputs = model({'obs': inputs, 'prev_state': prev_state}, inference=True)
>>> # Check outputs's Keys
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == (4, 64)
>>> assert len(outputs['next_state']) == 4
>>> assert all([len(t) == 2 for t in outputs['next_state']])
>>> assert all([t[0].shape == (1, 1, 64) for t in outputs['next_state']])
"""
x, prev_state = inputs['obs'], inputs['prev_state']
if inference:
x = self.encoder(x)
x = x.unsqueeze(0)
x, next_state = self.rnn(x, prev_state)
x = x.squeeze(0)
x = self.head(x)
x['next_state'] = next_state
return x
else:
assert len(x.shape) in [3, 5], x.shape
x = parallel_wrapper(self.encoder)(x)
lstm_embedding = []
# TODO(nyz) how to deal with hidden_size key-value
hidden_state_list = []
for t in range(x.shape[0]): # T timesteps
output, prev_state = self.rnn(x[t:t + 1], prev_state)
lstm_embedding.append(output)
hidden_state = list(zip(*prev_state))
hidden_state_list.append(torch.cat(hidden_state[0], dim=1))
x = torch.cat(lstm_embedding, 0)
x = parallel_wrapper(self.head)(x)
x['next_state'] = prev_state
x['hidden_state'] = torch.cat(hidden_state_list, dim=-3)
return x
class GeneralQNetwork(nn.Module):
pass
| 43.853276
| 119
| 0.564723
| 3,625
| 30,785
| 4.616276
| 0.070069
| 0.054978
| 0.037648
| 0.037648
| 0.812418
| 0.789052
| 0.773156
| 0.763117
| 0.747879
| 0.741723
| 0
| 0.01396
| 0.318239
| 30,785
| 701
| 120
| 43.915835
| 0.783352
| 0.448173
| 0
| 0.704918
| 0
| 0
| 0.038562
| 0
| 0
| 0
| 0
| 0.001427
| 0.002732
| 1
| 0.040984
| false
| 0.002732
| 0.016393
| 0
| 0.103825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22b4a3d1659296fdfa53a5621c793b2417067bcc
| 11,228
|
py
|
Python
|
django_recurly/tests/models.py
|
sprintly/django-recurly
|
9df1a860f8e25619df5cb4bc012d83804bd62749
|
[
"BSD-3-Clause"
] | 1
|
2016-06-26T23:04:29.000Z
|
2016-06-26T23:04:29.000Z
|
django_recurly/tests/models.py
|
sprintly/django-recurly
|
9df1a860f8e25619df5cb4bc012d83804bd62749
|
[
"BSD-3-Clause"
] | null | null | null |
django_recurly/tests/models.py
|
sprintly/django-recurly
|
9df1a860f8e25619df5cb4bc012d83804bd62749
|
[
"BSD-3-Clause"
] | 1
|
2017-09-29T09:21:42.000Z
|
2017-09-29T09:21:42.000Z
|
import unittest
import datetime
from django.test import TestCase
from django_recurly.tests.base import BaseTest
from django_recurly.models import *
from mock import patch, Mock
class AccountModelTest(BaseTest):
def test_handle_notification_creating(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
self.assertEqual(Account.objects.count(), 1)
self.assertEqual(Subscription.objects.count(), 1)
# Lets be thorough here
self.assertEqual(account.user.username, "verena")
self.assertEqual(account.first_name, "Verena")
self.assertEqual(account.company_name, "Company, Inc.")
self.assertEqual(account.email, "verena@test.com")
self.assertEqual(account.account_code, "verena@test.com")
subscription = account.get_current_subscription()
self.assertEqual(subscription.plan_code, "bronze")
self.assertEqual(subscription.plan_version, 2)
self.assertEqual(subscription.state, "active")
self.assertEqual(subscription.quantity, 2)
self.assertEqual(subscription.total_amount_in_cents, 2000)
self.assertEqual(subscription.activated_at, datetime.datetime(2009, 11, 22, 21, 10, 38)) # Phew, its in UTC now :)
self.assertSignal("account_opened")
self.assertNoSignal("account_closed")
def test_handle_notification_updating_canceled(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
Account.handle_notification(data)
self.resetSignals()
data = self.parse_xml(self.push_notifications["canceled_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
# Lets be through here
self.assertEqual(account.user.username, "verena")
self.assertEqual(account.first_name, "Jane")
self.assertEqual(account.last_name, "Doe")
self.assertEqual(account.company_name, None)
self.assertEqual(account.email, "janedoe@gmail.com")
self.assertEqual(account.account_code, "verena@test.com")
subscription = account.get_current_subscription()
self.assertEqual(subscription.plan_code, "1dpt")
self.assertEqual(subscription.plan_version, 2)
self.assertEqual(subscription.state, "canceled")
self.assertEqual(subscription.quantity, 1)
self.assertEqual(subscription.total_amount_in_cents, 200)
# Account was 'canceled', but is still technically open until is expires
self.assertNoSignal("account_opened")
self.assertNoSignal("account_closed")
def test_handle_notification_updating_expired(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
Account.handle_notification(data)
self.resetSignals()
data = self.parse_xml(self.push_notifications["expired_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
# Lets be through here
self.assertEqual(account.user.username, "verena")
self.assertEqual(account.first_name, "Jane")
self.assertEqual(account.last_name, "Doe")
self.assertEqual(account.company_name, None)
self.assertEqual(account.email, "janedoe@gmail.com")
self.assertEqual(account.account_code, "verena@test.com")
subscription = account.get_current_subscription()
self.assertEqual(subscription, None)
subscription = account.get_subscriptions().latest()
self.assertEqual(subscription.plan_code, "1dpt")
self.assertEqual(subscription.plan_version, 2)
self.assertEqual(subscription.state, "expired")
self.assertEqual(subscription.quantity, 1)
self.assertEqual(subscription.total_amount_in_cents, 200)
self.assertNoSignal("account_opened")
self.assertSignal("account_closed")
def test_handle_notification_updating_expired_real(self):
# Straight in with no prior account
data = self.parse_xml(self.push_notifications["expired_subscription_notification-real"])
account, subscription = Account.handle_notification(data)
# Lets be through here
self.assertEqual(account.user.username, "verena")
self.assertEqual(account.first_name, "Adam")
self.assertEqual(account.last_name, "Charnock")
self.assertEqual(account.company_name, None)
self.assertEqual(account.email, "adam@continuous.io")
self.assertEqual(account.account_code, "vKWanguTh5KcZniN0yZeFbjD8xmFfVGT")
subscription = account.get_current_subscription()
self.assertEqual(subscription, None)
subscription = account.get_subscriptions().latest()
self.assertEqual(subscription.plan_code, "micro")
self.assertEqual(subscription.plan_version, 1)
self.assertEqual(subscription.state, "expired")
self.assertEqual(subscription.quantity, 1)
self.assertEqual(subscription.total_amount_in_cents, 700)
self.assertEqual(subscription.activated_at, datetime.datetime(2011, 9, 14, 19, 14, 14))
# The subscription was created as 'expired' right away, so no signals
self.assertNoSignal("account_opened")
self.assertNoSignal("account_closed")
def test_handle_notification_new_after_expired(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
Account.handle_notification(data)
data = self.parse_xml(self.push_notifications["expired_subscription_notification-ok"])
Account.handle_notification(data)
self.resetSignals()
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
Account.handle_notification(data)
self.assertEqual(Account.objects.count(), 1)
# We should now have the old expired subscription, plus the fresh new one
self.assertEqual(Subscription.objects.count(), 2)
self.assertEqual(Subscription.objects.latest().state, "active")
self.assertSignal("account_opened")
self.assertNoSignal("account_closed")
def test_get_current(self):
from django_recurly.utils import recurly
recurly.Account.get = Mock(return_value=recurly.Account.from_element(self.resources["account-ok"]))
recurly.Subscription.get = Mock(return_value=recurly.Subscription.from_element(self.resources["subscription-ok"]))
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(**data)
print(account, subscription)
self.assertEqual(account.user.username, "verena")
class SubscriptionModelTest(BaseTest):
def test_is_current(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
self.assertTrue(Subscription.objects.latest().is_current())
data = self.parse_xml(self.push_notifications["expired_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
self.assertFalse(Subscription.objects.latest().is_current())
def test_is_trial(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
self.assertFalse(subscription.is_trial())
subscription.trial_ends_at = datetime.datetime.now() + datetime.timedelta(days=1)
subscription.save()
self.assertTrue(subscription.is_trial())
class PaymentModelTest(BaseTest):
def test_handle_payment_successful(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
data = self.parse_xml(self.push_notifications["successful_payment_notification-ok"])
payment = Payment.handle_notification(data)
self.assertEqual(Payment.objects.count(), 1)
payment = Payment.objects.all().latest()
self.assertEqual(payment.transaction_id, "a5143c1d3a6f4a8287d0e2cc1d4c0427")
self.assertEqual(payment.invoice_id, "ffc64d71d4b5404e93f13aac9c63bxxx")
self.assertEqual(payment.action, "purchase")
self.assertEqual(payment.date, datetime.datetime(2009, 11, 22, 21, 10, 38))
self.assertEqual(payment.amount_in_cents, 1000)
self.assertEqual(payment.status, "success")
self.assertEqual(payment.message, "Bogus Gateway: Forced success")
def test_handle_refund(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
data = self.parse_xml(self.push_notifications["successful_refund_notification-ok"])
payment = Payment.handle_notification(data)
self.assertEqual(Payment.objects.count(), 1)
payment = Payment.objects.all().latest()
self.assertEqual(payment.transaction_id, "2c7a2e30547e49869efd4e8a44b2be34")
self.assertEqual(payment.invoice_id, "ffc64d71d4b5404e93f13aac9c63b007")
self.assertEqual(payment.action, "credit")
self.assertEqual(payment.amount_in_cents, 235)
self.assertEqual(payment.status, "success")
self.assertEqual(payment.message, "Bogus Gateway: Forced success")
def test_handle_void(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
data = self.parse_xml(self.push_notifications["void_payment_notification-ok"])
payment = Payment.handle_notification(data)
self.assertEqual(Payment.objects.count(), 1)
payment = Payment.objects.all().latest()
self.assertEqual(payment.transaction_id, "4997ace0f57341adb3e857f9f7d15de8")
self.assertEqual(payment.invoice_id, "ffc64d71d4b5404e93f13aac9c63b007")
self.assertEqual(payment.action, "purchase")
self.assertEqual(payment.amount_in_cents, 235)
self.assertEqual(payment.status, "void")
self.assertEqual(payment.message, "Test Gateway: Successful test transaction")
def test_handle_failed(self):
data = self.parse_xml(self.push_notifications["new_subscription_notification-ok"])
account, subscription = Account.handle_notification(data)
data = self.parse_xml(self.push_notifications["failed_payment_notification-ok"])
payment = Payment.handle_notification(data)
self.assertEqual(Payment.objects.count(), 1)
payment = Payment.objects.all().latest()
self.assertEqual(payment.transaction_id, "a5143c1d3a6f4a8287d0e2cc1d4c0427")
self.assertEqual(payment.invoice_id, None)
self.assertEqual(payment.action, "purchase")
self.assertEqual(payment.amount_in_cents, 1000)
self.assertEqual(payment.status, "declined")
self.assertEqual(payment.message, "This transaction has been declined")
| 43.688716
| 123
| 0.723281
| 1,208
| 11,228
| 6.534768
| 0.146523
| 0.155815
| 0.080821
| 0.042564
| 0.813529
| 0.768812
| 0.763745
| 0.74639
| 0.724728
| 0.724728
| 0
| 0.024033
| 0.173584
| 11,228
| 256
| 124
| 43.859375
| 0.826705
| 0.031439
| 0
| 0.61236
| 0
| 0
| 0.141924
| 0.087253
| 0
| 0
| 0
| 0
| 0.539326
| 1
| 0.067416
| false
| 0
| 0.039326
| 0
| 0.123596
| 0.005618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe1e3536a8d90e753527db601901f762e05f6bfb
| 5,245
|
py
|
Python
|
ksteta3pi/Consideredbkg/MC_12_11134011_MagDown.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/Consideredbkg/MC_12_11134011_MagDown.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/Consideredbkg/MC_12_11134011_MagDown.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
#-- GAUDI jobOptions generated on Mon Jul 20 10:20:06 2015
#-- Contains event types :
#-- 11134011 - 46 files - 987788 events - 276.67 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-127969
#-- StepId : 127969
#-- StepName : Reco14c for MC - 2012
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p11
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/DST-multipleTCK-2012.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r218
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000028_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000043_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000044_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000045_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046299/0000/00046299_00000046_2.AllStreams.dst'
], clear=True)
| 63.192771
| 215
| 0.800381
| 738
| 5,245
| 5.563686
| 0.181572
| 0.291281
| 0.100828
| 0.145641
| 0.721383
| 0.721383
| 0.721383
| 0.721383
| 0.721383
| 0.71018
| 0
| 0.322825
| 0.044423
| 5,245
| 82
| 216
| 63.963415
| 0.496409
| 0.222688
| 0
| 0
| 1
| 0.938776
| 0.920533
| 0.919546
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.040816
| 0
| 0.040816
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
a3b6339a5b40f09cc1437ffacc72d616a3e2a23b
| 25,384
|
py
|
Python
|
archive/alon_base/summary_stats_plots.py
|
ngglasgow/biophys_glm_show
|
6554fa395974d95ba78fd5cbe2cf0272e7d69df4
|
[
"MIT"
] | 1
|
2022-03-11T12:47:19.000Z
|
2022-03-11T12:47:19.000Z
|
archive/alon_base/summary_stats_plots.py
|
ngglasgow/biophys_glm_show
|
6554fa395974d95ba78fd5cbe2cf0272e7d69df4
|
[
"MIT"
] | null | null | null |
archive/alon_base/summary_stats_plots.py
|
ngglasgow/biophys_glm_show
|
6554fa395974d95ba78fd5cbe2cf0272e7d69df4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import numpy as np
from neo.core import SpikeTrain, AnalogSignal
import quantities as pq
import pandas as pd
import elephant
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import sklearn
sns.set()
% matplotlib inline
# set system type as macOS or linux; set project directory; set downstream dirs
# set all these directory variables first
home_dir = os.path.expanduser("~")
project_dir = home_dir + '/ngglasgow@gmail.com/Data_Urban/NEURON/AlmogAndKorngreen2014/par_ModCell_5_thrdsafe/'
data_dir = project_dir + 'scaled_wns_400_8015_output/'
figure_dir = project_dir + 'analysis/figures/'
table_dir = project_dir + 'analysis/tables/'
noise_dir = project_dir + 'scaled_wns_400/'
noise_filename = noise_dir + 'wns_8015_forscaled.txt'
downsample_dir = project_dir + 'alon_scaled_8015_downsampled/'
ordered_channels = ['iA', 'iH', 'sk', 'cah', 'car', 'bk', 'kslow', 'na']
'''################ Plot Spiketrain Stats on One plot with #################'''
# open the spiketrain statistics file if not already open
stats_df = pd.read_csv(table_dir + 'scaled_spiketrain_stats.csv')
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
# quick fix for making 0 Hz a nan
list0 = stats_df.index[stats_df['Firing Rate (Hz)'] == 0.0].tolist()
for item in list0:
stats_df['Firing Rate (Hz)'].iloc[item] = np.nan
stats_df
# settings for x and y axis minor ticks, the number stands for delta tick
x_minor = MultipleLocator(0.5)
# set the seaborn background style and generate plots
sns.set_style('ticks')
dark2 = [u'#1b9e77', u'#d95f02', u'#7570b3', u'#e7298a', u'#66a61e', u'#e6ab02', u'#a6761d', u'#666666']
# make a subplot fig with 3 rows 2 cols 8"x8" with autosizing
stat_list = ['Firing Rate (Hz)', 'CV ISI', 'Correlation']
feature_list = ['Positive Peak (pA)', 'Pos. Peak Time (ms)', 'Negative Peak (pA)', 'Neg. Peak Time (ms)']
important_channels = ['iA', 'iH', 'cah', 'sk']
na_k_channels = ['na', 'kslow', 'bk', 'car']
# create figure for all four stats, two channel types, then sta features
fig, axs = plt.subplots(3, 2, figsize=(5, 5.5), constrained_layout=True)
# add modulatory channel row for spiketrain stats
for row, stat in zip(range(3), stat_list):
stat_set = stats_df.loc[:, ['Channel', 'Scale', stat]]
for channel, i in zip(important_channels, range(4)):
channel_stats = stat_set[stat_set['Channel'] == channel]
scale = channel_stats['Scale']
axs[row, 0].plot(scale, channel_stats[stat], label=channel, marker='o', color=dark2[i])
axs[row, 0].set_xticks([0, 1, 2])
axs[row, 0].set_xlim(-0.2, 2.2)
axs[row, 0].xaxis.set_minor_locator(x_minor)
axs[row, 0].spines['right'].set_visible(False)
axs[row, 0].spines['top'].set_visible(False)
axs[row, 0].set_ylabel(stat)
# add non mod channels
for row, stat in zip(range(3), stat_list):
stat_set = stats_df.loc[:, ['Channel', 'Scale', stat]]
for channel, i in zip(na_k_channels, range(4)):
channel_stats = stat_set[stat_set['Channel'] == channel]
scale = channel_stats['Scale']
axs[row, 1].plot(scale, channel_stats[stat], label=channel, marker='o', color=dark2[i+3])
axs[row, 1].set_xticks([0, 1, 2])
axs[row, 1].set_xlim(-0.2, 2.2)
axs[row, 1].xaxis.set_minor_locator(x_minor)
axs[row, 1].spines['right'].set_visible(False)
axs[row, 1].spines['top'].set_visible(False)
# set x-labels
axs[2, 1].set_xlabel('Scaling Factor')
axs[2, 0].set_xlabel('Scaling Factor')
fig
# set yscales and minor ticks for each row and col
fr_yscale = (-5, 100)
cv_yscale = (-0.05, 1.4)
corr_yscale = (0.08, 0.4)
y_scale_list = [fr_yscale, cv_yscale, corr_yscale]
fr_yminor = MultipleLocator(25)
cv_yminor = MultipleLocator()
corr_yminor = MultipleLocator(0.05)
yminor_list = [fr_yminor, cv_yminor, corr_yminor]
for row in range(3):
for col in range(2):
axs[row, col].set_ylim(y_scale_list[row])
axs[row, col].yaxis.set_minor_locator(yminor_list[row])
fig
# save fig to file
fig.savefig(figure_dir + 'stats_summary_vertical55.png', dpi=300, format='png')
'''################ Plot Spiketrain Stats on One plot horizontally #################'''
# open the spiketrain statistics file if not already open
stats_df = pd.read_csv(table_dir + 'scaled_spiketrain_stats.csv')
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
# quick fix for making 0 Hz a nan
list0 = stats_df.index[stats_df['Firing Rate (Hz)'] == 0.0].tolist()
for item in list0:
stats_df['Firing Rate (Hz)'].iloc[item] = np.nan
stats_df
# settings for x and y axis minor ticks, the number stands for delta tick
x_minor = MultipleLocator(0.5)
# set the seaborn background style and generate plots
sns.set_style('ticks')
dark2 = [u'#1b9e77', u'#d95f02', u'#7570b3', u'#e7298a', u'#66a61e', u'#e6ab02', u'#a6761d', u'#666666']
# make a subplot fig with 3 rows 2 cols 8"x8" with autosizing
stat_list = ['Firing Rate (Hz)', 'CV ISI', 'Correlation']
feature_list = ['Positive Peak (pA)', 'Pos. Peak Time (ms)', 'Negative Peak (pA)', 'Neg. Peak Time (ms)']
important_channels = ['iA', 'iH', 'cah', 'sk']
na_k_channels = ['na', 'kslow', 'bk', 'car']
# create figure for all four stats, two channel types, then sta features
fig, axs = plt.subplots(1, 6, figsize=(16.5, 1.75), constrained_layout=True)
# add modulatory channel row for spiketrain stats
for col, stat in zip(np.arange(0, 6, 2), stat_list):
stat_set = stats_df.loc[:, ['Channel', 'Scale', stat]]
for channel, i in zip(important_channels, range(3)):
channel_stats = stat_set[stat_set['Channel'] == channel]
scale = channel_stats['Scale']
axs[col].plot(scale, channel_stats[stat], label=channel, marker='o', color=dark2[i])
axs[col].set_xticks([0, 1, 2, 3])
axs[col].set_xlim(-0.2, 3.2)
axs[col].xaxis.set_minor_locator(x_minor)
axs[col].spines['right'].set_visible(False)
axs[col].spines['top'].set_visible(False)
axs[col].set_xlabel('Scaling Factor')
axs[col].set_ylabel(stat)
# add non mod channels
for col, stat in zip(np.arange(1, 6, 2), stat_list):
stat_set = stats_df.loc[:, ['Channel', 'Scale', stat]]
for channel, i in zip(na_k_channels, range(3)):
channel_stats = stat_set[stat_set['Channel'] == channel]
scale = channel_stats['Scale']
axs[col].plot(scale, channel_stats[stat], label=channel, marker='o', color=dark2[i+3])
axs[col].set_xticks([0, 1, 2, 3])
axs[col].set_xlim(-0.2, 3.2)
axs[col].xaxis.set_minor_locator(x_minor)
axs[col].spines['right'].set_visible(False)
axs[col].spines['top'].set_visible(False)
axs[col].set_xlabel('Scaling Factor')
# axs[col].set_ylabel(stat)
fig
# set yscales and minor ticks for each row and col
fr_yscale = (20, 70)
cv_yscale = (0.10, 0.4)
corr_yscale = (0.08, 0.26)
y_scale_list = [fr_yscale, cv_yscale, corr_yscale]
fr_yminor = MultipleLocator(10)
cv_yminor = MultipleLocator(0.1)
corr_yminor = MultipleLocator(0.05)
yminor_list = [fr_yminor, cv_yminor, corr_yminor]
col_list = [[0, 1], [2, 3], [4, 5]]
for cols, i in zip(col_list, range(3)):
for col in cols:
axs[col].set_ylim(y_scale_list[i])
axs[col].yaxis.set_minor_locator(yminor_list[i])
fig
# save fig to file
fig.savefig(figure_dir + 'stats_summary_labels_horiz.png', dpi=300, format='png')
'''######################## STA correlations ###############################'''
sta_mean = pd.read_csv(table_dir + 'sta_mean.csv').T
sta_mean.T
stas = sta_mean.iloc[3:-50] * 1000
stas = stas.astype(float)
sta_mean
sta_corr = stas.corr()
sns_plot = sns.heatmap(sta_corr, vmin=0.5, cmap='inferno', xticklabels=10, yticklabels=10, square=True, robust=True, linecolor='w')
fig = sns_plot.get_figure()
fig.savefig(figure_dir + 'sta_correlations.png', dpi=300, format='png')
'''############### STA Features vs. Channel Conductance ####################'''
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
dark2 = [u'#1b9e77', u'#d95f02', u'#7570b3', u'#e7298a', u'#66a61e', u'#e6ab02', u'#a6761d', u'#666666']
sns.set_style('ticks')
feature_list = ['Positive Peak (pA)', 'Negative Peak (pA)', 'Max Slope (pA/ms)', 'Integration Time (ms)']
important_channels = ['iA', 'iH']
na_k_channels = ['na', 'kslow', 'bk', 'car']
# put into a single figure
x_minor = MultipleLocator(0.5)
y_minor = MultipleLocator(10)
gs_kw = dict(height_ratios=[1, 1, 1.5])
fig, axs = plt.subplots(3, 2, figsize=(6, 5.5), constrained_layout=True, gridspec_kw=gs_kw)
for ax, feature in zip(axs.flat, feature_list):
feature_set = sta_features.loc[:, ['Channel', 'Scale', feature]]
for channel, i in zip(important_channels, range(4)):
channel_feature = feature_set[feature_set['Channel'] == channel]
scale = channel_feature['Scale']
ax.plot(scale, channel_feature[feature], label=channel, marker='o', color=dark2[i])
ax.set_xlim(-0.2, 2.2)
ax.xaxis.set_minor_locator(x_minor)
# ax.set_ylabel(feature)
# ax.set_ylim(-25, 55)
# ax.yaxis.set_minor_locator(y_minor)
ax.spines["right"].set_visible(False) # if I want cleaner look w/out
ax.spines["top"].set_visible(False) # top and right axes lines
# set bottom x labels
axs[1, 0].set_xlabel('Scaling Factor')
axs[1, 1].set_xlabel('Scaling Factor')
axs[0, 0].set_ylabel('Pos. Peak (pA)')
axs[0, 1].set_ylabel('Neg. Peak (pA)')
axs[1, 0].set_ylabel('Slope (pA/ms)')
axs[1, 1].set_ylabel('Int. Time (ms)')
axs[1, 1].set_ylim(0, 15)
# fig
# set yscales and minor ticks for each row and col
# axs[0, 0].set_ylim(15, 55)
# axs[0, 0].yaxis.set_minor_locator(MultipleLocator(10))
# axs[0, 1].set_ylim(-25, -7.5)
# axs[0, 1].yaxis.set_minor_locator(MultipleLocator(5))
#axs[1, 0].set_ylim(2, 10)
# axs[1, 0].yaxis.set_minor_locator(MultipleLocator(10))
#axs[1, 1].set_ylim(0, 15)
# axs[1, 1].yaxis.set_minor_locator(MultipleLocator(10))
#fig
list0 = sta_features.index[sta_features['Integration Time (ms)'] > 20.0].tolist()
for item in list0:
sta_features['Integration Time (ms)'].iloc[item] = np.nan
# add the bottom two plots
for channel, i in zip(important_channels, range(4)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.50].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 1.50].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[2, 0].plot(pos_peak, slope, color=dark2[i], marker='.')
axs[2, 1].plot(slope, int_time, color=dark2[i], marker='.')
axs[2, 0].plot(pos_peak.loc[scale1], slope.loc[scale1], linestyle='', marker='o', markersize=8, color='k')
axs[2, 1].plot(slope.loc[scale1], int_time.loc[scale1], linestyle='', marker='o', markersize=8, color='k')
for channel, i in zip(important_channels, range(4)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.50].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 1.50].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[2, 0].plot(pos_peak.loc[scale01], slope.loc[scale01], linestyle='', marker=11, markersize=10, color=dark2[i], markeredgecolor='k')
axs[2, 0].plot(pos_peak.loc[scale3], slope.loc[scale3], linestyle='', marker=10, markersize=10, color=dark2[i], markeredgecolor='k')
axs[2, 1].plot(slope.loc[scale01], int_time.loc[scale01], linestyle='', marker=11, markersize=10, color=dark2[i], markeredgecolor='k')
axs[2, 1].plot(slope.loc[scale3], int_time.loc[scale3], linestyle='', marker=10, markersize=10, color=dark2[i], markeredgecolor='k')
axs[2, 0].set_xlabel('Pos. Peak (pA)')
axs[2, 0].set_ylabel('Slope (pA/ms)')
axs[2, 0].spines["right"].set_visible(False) # if I want cleaner look w/out
axs[2, 0].spines["top"].set_visible(False) # top and right axes lines
axs[2, 1].set_ylim(0, 15)
axs[2, 1].set_xlabel('Slope (pA/ms)')
axs[2, 1].set_ylabel('Int. Time (ms)')
axs[2, 1].spines["right"].set_visible(False) # if I want cleaner look w/out
axs[2, 1].spines["top"].set_visible(False) # top and right axes lines
fig
# save to file
fig.savefig(figure_dir + 'sta_feature_summary_vertical55_2.png', dpi=300, format='png')
'''############### STA Features vs. Scales Horizontal ####################'''
% matplotlib inline
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
feature_list = ['Positive Peak (pA)', 'Negative Peak (pA)', 'Max Slope (pA/ms)', 'Integration Time (ms)']
important_channels = ['kA', 'kca3', 'lcafixed']
na_k_channels = ['nafast', 'kslowtab', 'kfasttab']
# put into a single figure
x_minor = MultipleLocator(0.5)
fig, axs = plt.subplots(1, 6, figsize=(16.5, 2.25), constrained_layout=True)
for ax, feature in zip(axs.flat, feature_list):
feature_set = sta_features.loc[:, ['Channel', 'Scale', feature]]
for channel, i in zip(important_channels, range(4)):
channel_feature = feature_set[feature_set['Channel'] == channel]
scale = channel_feature['Scale']
ax.plot(scale, channel_feature[feature], label=channel, marker='o', color=dark2[i])
ax.set_xlim(-0.2, 3.2)
ax.xaxis.set_minor_locator(x_minor)
ax.set_ylabel(feature)
# ax.set_ylim(-25, 55)
# ax.yaxis.set_minor_locator(y_minor)
ax.spines["right"].set_visible(False) # if I want cleaner look w/out
ax.spines["top"].set_visible(False) # top and right axes lines
ax.set_xlabel('Scaling Factor')
# set y limits
axs[0].set_ylim(15, 55)
axs[1].set_ylim(-25, -7.5)
axs[2].set_ylim(2, 10)
axs[3].set_ylim(7, 15)
# add sta_feature vs. feature plots
for channel, i in zip(important_channels, range(4)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[4].plot(pos_peak, slope, color=dark2[i], marker='.')
axs[5].plot(slope, int_time, color=dark2[i], marker='.')
axs[4].plot(pos_peak.loc[scale1], slope.loc[scale1], linestyle='', marker='o', markersize=8, color='k')
axs[5].plot(slope.loc[scale1], int_time.loc[scale1], linestyle='', marker='o', markersize=8, color='k')
for channel, i in zip(important_channels, range(4)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[4].plot(pos_peak.loc[scale01], slope.loc[scale01], linestyle='', marker=11, markersize=10, color=dark2[i], markeredgecolor='k')
axs[4].plot(pos_peak.loc[scale3], slope.loc[scale3], linestyle='', marker=10, markersize=10, color=dark2[i], markeredgecolor='k')
axs[5].plot(slope.loc[scale01], int_time.loc[scale01], linestyle='', marker=11, markersize=10, color=dark2[i], markeredgecolor='k')
axs[5].plot(slope.loc[scale3], int_time.loc[scale3], linestyle='', marker=10, markersize=10, color=dark2[i], markeredgecolor='k')
axs[4].set_xlabel(pos_peak.name)
axs[4].set_ylabel(slope.name)
axs[4].spines["right"].set_visible(False) # if I want cleaner look w/out
axs[4].spines["top"].set_visible(False) # top and right axes lines
axs[5].set_xlabel(slope.name)
axs[5].set_ylabel(int_time.name)
axs[5].spines["right"].set_visible(False) # if I want cleaner look w/out
axs[5].spines["top"].set_visible(False) # top and right axes lines
fig
# save to file
fig.savefig(figure_dir + 'sta_feature_summary_horizontal2.png', dpi=300, format='png')
'''################ make channel legends for bhalla model ##################'''
fig, ax = plt.subplots(1, 2, constrained_layout=True)
for channel, i in zip(ordered_channels, range(8)):
channel_set = sta_features['Scale'][sta_features['Channel'] == channel]
ax[0].plot(channel_set, marker='o', color=dark2[i], label=channel)
ax[0].legend(loc='center left', bbox_to_anchor=(1.1, 0.5))
fig
fig.savefig(figure_dir + 'channel_legend.png', dpi=300, format='png')
'''#################### STA Features vs. STA Features ######################'''
% matplotlib
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
important_channels = ['kA', 'kca3', 'lcafixed']
fig, axs = plt.subplots(2, 1, figsize=(3, 5.25), constrained_layout=True)
for channel, i in zip(important_channels, range(3)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[0].plot(pos_peak, neg_peak, color=dark2[i])
axs[1].plot(slope, int_time, color=dark2[i])
for channel, i in zip(important_channels, range(3)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[0].plot(pos_peak.loc[scale01], neg_peak.loc[scale01], linestyle='', marker='^', markersize=10, color=dark2[i])
axs[0].plot(pos_peak.loc[scale1], neg_peak.loc[scale1], linestyle='', marker='o', markersize=10, color='k')
axs[0].plot(pos_peak.loc[scale3], neg_peak.loc[scale3], linestyle='', marker='v', markersize=10, color=dark2[i])
axs[1].plot(slope.loc[scale01], int_time.loc[scale01], linestyle='', marker='^', markersize=10, color=dark2[i])
axs[1].plot(slope.loc[scale1], int_time.loc[scale1], linestyle='', marker='o', markersize=10, color='k')
axs[1].plot(slope.loc[scale3], int_time.loc[scale3], linestyle='', marker='v', markersize=10, color=dark2[i])
axs[0].set_xlabel('Pos. Peak (pA)')
axs[0].set_ylabel('Neg. Peak (pA)')
axs[1].set_xlabel('Max Slope (pA/ms)')
axs[1].set_ylabel('Integration Time (ms)')
fig
# save fig
fig.savefig(figure_dir + 'posvneg_slopevint.png', dpi=300, format='png')
'''#################### STA Features vs. Spike Stats ######################'''
% matplotlib
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
important_channels = ['kA', 'kca3', 'lcafixed']
fig, axs = plt.subplots(2, 1, figsize=(3, 5.25), constrained_layout=True)
for channel, i in zip(important_channels, range(3)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[0].plot(pos_peak, neg_peak, color=dark2[i])
axs[1].plot(slope, int_time, color=dark2[i])
for channel, i in zip(important_channels, range(3)):
channel_set = sta_features[feature_set['Channel'] == channel]
scale01 = channel_set['Scale'][channel_set['Scale'] == 0.01].index
scale1 = channel_set['Scale'][channel_set['Scale'] == 1.00].index
scale3 = channel_set['Scale'][channel_set['Scale'] == 3.00].index
pos_peak = channel_set['Positive Peak (pA)']
neg_peak = channel_set['Negative Peak (pA)']
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
axs[0].plot(pos_peak.loc[scale01], neg_peak.loc[scale01], linestyle='', marker='^', markersize=10, color=dark2[i])
axs[0].plot(pos_peak.loc[scale1], neg_peak.loc[scale1], linestyle='', marker='o', markersize=10, color='k')
axs[0].plot(pos_peak.loc[scale3], neg_peak.loc[scale3], linestyle='', marker='v', markersize=10, color=dark2[i])
axs[1].plot(slope.loc[scale01], int_time.loc[scale01], linestyle='', marker='^', markersize=10, color=dark2[i])
axs[1].plot(slope.loc[scale1], int_time.loc[scale1], linestyle='', marker='o', markersize=10, color='k')
axs[1].plot(slope.loc[scale3], int_time.loc[scale3], linestyle='', marker='v', markersize=10, color=dark2[i])
axs[0].set_xlabel('Pos. Peak (pA)')
axs[0].set_ylabel('Neg. Peak (pA)')
axs[1].set_xlabel('Max Slope (pA/ms)')
axs[1].set_ylabel('Integration Time (ms)')
fig
# save fig
fig.savefig(figure_dir + 'posvneg_slopevint.png', dpi=300, format='png')
'''################ STA Features and Spike Statistics Plots ################'''
# open data from files
stats_df = pd.read_csv(table_dir + 'scaled_spiketrain_stats.csv')
sta_features = pd.read_csv(table_dir + 'sta_features_index.csv')
# merge into a single df to allow easy calling later on
stats_features = pd.merge(stats_df, sta_features, on=['Channel', 'Scale'])
stats_features.columns
# set up lists of subsets of channels for testing
important_channels = ['kA', 'kca3', 'lcafixed']
na_k_channels = ['nafast', 'kslowtab', 'kfasttab']
test = ['kA']
''' plot 2d images with labels at 3 an 0.01'''
plt.figure()
for channel in important_channels:
channel_set = stats_features[stats_features['Channel'] == channel]
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
scale = channel_set['Scale']
plt.plot(slope, int_time, label=channel, marker='o')
for label, x, y in zip(scale, slope, int_time):
if label == 3.0:
plt.annotate('(%s)' % label, xy=(x, y), textcoords='data')
elif label == 0.01:
plt.annotate('(%s)' % label, xy=(x, y), textcoords='data')
plt.legend()
scale
for label, x, y in zip (scale, slope, int_time):
if label == 3.0:
print label, x, y
elif label == 0.01:
print label, x, y
zip(scale, slope, int_time)
''' plot 2d images with different markers at 3, 1, 0.01'''
'''
USe a ax.get_line[-1].get_markerfacecolor() to set the color of all markers for
a given line. NOt fully tested yet but should work.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
for channel in important_channels:
channel_set = stats_features[stats_features['Channel'] == channel]
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
scale = channel_set['Scale']
ax.plot(slope, int_time, label=channel)
# for label, x, y in zip(scale, slope, int_time):
# if label == 3.0:
# ax.scatter(x, y, marker='^')
# elif label == 1:
# ax.scatter(x, y, marker='o')
# elif label == 0.01:
# ax.scatter(x, y, marker='v')
plt.legend()
ax.get_facecolor()
ax.get_lines()[2].get_markerfacecolor()
sns.palplot([(0.2980392156862745, 0.4470588235294118, 0.6901960784313725), (0.3333333333333333, 0.6588235294117647, 0.40784313725490196), (0.7686274509803922, 0.3058823529411765, 0.3215686274509804)])
for feature in stats_features_list:
plt.figure()
for channel in important_channels:
channel_set = stats_features[stats_features['Channel'] == channel]
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
scale = channel_set[feature]
ax.plot(slope, int_time, feat, label=channel, marker='o')
ax.legend()
ax.set_title(feature)
ax.set_xlabel('max slope (pA/ms)')
ax.set_ylabel('integration time (ms)')
ax.set_zlabel(feature)
''' plot 3d images '''
from mpl_toolkits.mplot3d import Axes3D
stats_features_list = ['Correlation', 'Firing Rate (Hz)', 'Fano Factor', 'CV ISI']
for feature in stats_features_list:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for channel in important_channels:
channel_set = stats_features[stats_features['Channel'] == channel]
slope = channel_set['Max Slope (pA/ms)']
int_time = channel_set['Integration Time (ms)']
feat = channel_set[feature]
ax.plot(slope, int_time, feat, label=channel, marker='o')
ax.legend()
ax.set_title(feature)
ax.set_xlabel('max slope (pA/ms)')
ax.set_ylabel('integration time (ms)')
ax.set_zlabel(feature)
| 43.61512
| 200
| 0.675189
| 3,937
| 25,384
| 4.193802
| 0.095758
| 0.0642
| 0.045424
| 0.031494
| 0.844225
| 0.818606
| 0.782448
| 0.755072
| 0.729453
| 0.71522
| 0
| 0.045877
| 0.143003
| 25,384
| 581
| 201
| 43.690189
| 0.71311
| 0.09896
| 0
| 0.627119
| 0
| 0
| 0.155458
| 0.02604
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.084746
| null | null | 0.004843
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43078476be0db6ceb750ec8efbb654cd9864421b
| 26,070
|
py
|
Python
|
src/pythae/models/nn/benchmarks/mnist.py
|
clementchadebec/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 143
|
2021-10-17T08:43:33.000Z
|
2022-03-31T11:10:53.000Z
|
src/pythae/models/nn/benchmarks/mnist.py
|
louis-j-vincent/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 6
|
2022-01-21T17:40:09.000Z
|
2022-03-16T13:09:22.000Z
|
src/pythae/models/nn/benchmarks/mnist.py
|
louis-j-vincent/benchmark_VAE
|
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
|
[
"Apache-2.0"
] | 18
|
2021-12-16T15:17:08.000Z
|
2022-03-15T01:30:13.000Z
|
"""Proposed neural nets architectures suited for MNIST"""
from typing import List
import numpy as np
import torch
import torch.nn as nn
from pythae.models.nn import BaseDecoder, BaseDiscriminator, BaseEncoder
from ....models import BaseAEConfig
from ....models.base.base_utils import ModelOutput
from ..base_architectures import BaseDecoder, BaseEncoder
class Encoder_AE_MNIST(BaseEncoder):
"""
A proposed Convolutional encoder Neural net suited for MNIST and Autoencoder-based models.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.mnist import Encoder_AE_MNIST
>>> from pythae.models import AEConfig
>>> model_config = AEConfig(input_dim=(1, 28, 28), latent_dim=16)
>>> encoder = Encoder_AE_MNIST(model_config)
>>> encoder
... Encoder_AE_MNIST(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(1, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... )
... (embedding): Linear(in_features=1024, out_features=16, bias=True)
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import AE
>>> model = AE(model_config=model_config, encoder=encoder)
>>> model.encoder == encoder
... True
.. note::
Please note that this encoder is only suitable for Autoencoder based models since it only
outputs the embeddings of the input data under the key `embedding`.
.. code-block::
>>> import torch
>>> input = torch.rand(2, 1, 28, 28)
>>> out = encoder(input)
>>> out.embedding.shape
... torch.Size([2, 16])
"""
def __init__(self, args: BaseAEConfig):
BaseEncoder.__init__(self)
self.input_dim = (1, 28, 28)
self.latent_dim = args.latent_dim
self.n_channels = 1
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1), nn.BatchNorm2d(256), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1), nn.BatchNorm2d(512), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1), nn.BatchNorm2d(1024), nn.ReLU()
)
)
self.layers = layers
self.depth = len(layers)
self.embedding = nn.Linear(1024, args.latent_dim)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the embeddings of the input data
under the key `embedding`. Optional: The outputs of the layers specified in
`output_layer_levels` arguments are available under the keys `embedding_layer_i` where
i is the layer's level."""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth})."
f"Got ({output_layer_levels})."
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = self.embedding(out.reshape(x.shape[0], -1))
return output
class Encoder_VAE_MNIST(BaseEncoder):
"""
A Convolutional encoder Neural net suited for MNIST and Variational Autoencoder-based
models.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.mnist import Encoder_VAE_MNIST
>>> from pythae.models import VAEConfig
>>> model_config = VAEConfig(input_dim=(1, 28, 28), latent_dim=16)
>>> encoder = Encoder_VAE_MNIST(model_config)
>>> encoder
... Encoder_VAE_MNIST(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(1, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... )
... (embedding): Linear(in_features=1024, out_features=16, bias=True)
... (log_var): Linear(in_features=1024, out_features=16, bias=True)
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import VAE
>>> model = VAE(model_config=model_config, encoder=encoder)
>>> model.encoder == encoder
... True
.. note::
Please note that this encoder is only suitable for Variational Autoencoder based models
since it outputs the embeddings and the **log** of the covariance diagonal coefficients
of the input data under the key `embedding` and `log_covariance`.
.. code-block::
>>> import torch
>>> input = torch.rand(2, 1, 28, 28)
>>> out = encoder(input)
>>> out.embedding.shape
... torch.Size([2, 16])
>>> out.log_covariance.shape
... torch.Size([2, 16])
"""
def __init__(self, args: BaseAEConfig):
BaseEncoder.__init__(self)
self.input_dim = (1, 28, 28)
self.latent_dim = args.latent_dim
self.n_channels = 1
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1), nn.BatchNorm2d(256), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1), nn.BatchNorm2d(512), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1), nn.BatchNorm2d(1024), nn.ReLU()
)
)
self.layers = layers
self.depth = len(layers)
self.embedding = nn.Linear(1024, args.latent_dim)
self.log_var = nn.Linear(1024, args.latent_dim)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the embeddings of the input data
under the key `embedding` and the **log** of the diagonal coefficient of the covariance
matrices under the key `log_covariance`. Optional: The outputs of the layers specified
in `output_layer_levels` arguments are available under the keys `embedding_layer_i`
where i is the layer's level."""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth})."
f"Got ({output_layer_levels})"
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = self.embedding(out.reshape(x.shape[0], -1))
output["log_covariance"] = self.log_var(out.reshape(x.shape[0], -1))
return output
class Encoder_SVAE_MNIST(BaseEncoder):
"""
A Convolutional encoder Neural net suited for mnist and Hyperspherical autoencoder
Variational Autoencoder.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.mnist import Encoder_SVAE_MNIST
>>> from pythae.models import SVAEConfig
>>> model_config = SVAEConfig(input_dim=(1, 28, 28), latent_dim=16)
>>> encoder = Encoder_SVAE_MNIST(model_config)
>>> encoder
... Encoder_SVAE_MNIST(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(1, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... )
... (embedding): Linear(in_features=1024, out_features=16, bias=True)
... (log_concentration): Linear(in_features=1024, out_features=1, bias=True)
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import SVAE
>>> model = SVAE(model_config=model_config, encoder=encoder)
>>> model.encoder == encoder
... True
.. note::
Please note that this encoder is only suitable for Hyperspherical Variational Autoencoder
models since it outputs the embeddings and the **log** of the concentration in the
Von Mises Fisher distributions under the key `embedding` and `log_concentration`.
.. code-block::
>>> import torch
>>> input = torch.rand(2, 1, 28, 28)
>>> out = encoder(input)
>>> out.embedding.shape
... torch.Size([2, 16])
>>> out.log_concentration.shape
... torch.Size([2, 1])
"""
def __init__(self, args: BaseAEConfig):
BaseEncoder.__init__(self)
self.input_dim = (1, 28, 28)
self.latent_dim = args.latent_dim
self.n_channels = 1
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1), nn.BatchNorm2d(256), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1), nn.BatchNorm2d(512), nn.ReLU()
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1), nn.BatchNorm2d(1024), nn.ReLU()
)
)
self.layers = layers
self.depth = len(layers)
self.embedding = nn.Linear(1024, args.latent_dim)
self.log_concentration = nn.Linear(1024, 1)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the embeddings of the input data
under the key `embedding` and the **log** of the diagonal coefficient of the covariance
matrices under the key `log_covariance`. Optional: The outputs of the layers specified
in `output_layer_levels` arguments are available under the keys `embedding_layer_i`
where i is the layer's level."""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth})."
f"Got ({output_layer_levels})"
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = self.embedding(out.reshape(x.shape[0], -1))
output["log_concentration"] = self.log_concentration(
out.reshape(x.shape[0], -1)
)
return output
class Decoder_AE_MNIST(BaseDecoder):
"""
A proposed Convolutional decoder Neural net suited for MNIST and Autoencoder-based
models.
.. code-block::
>>> from pythae.models.nn.benchmarks.mnist import Decoder_AE_MNIST
>>> from pythae.models import VAEConfig
>>> model_config = VAEConfig(input_dim=(1, 28, 28), latent_dim=16)
>>> decoder = Decoder_AE_MNIST(model_config)
>>> decoder
... Decoder_AE_MNIST(
... (layers): ModuleList(
... (0): Linear(in_features=16, out_features=16384, bias=True)
... (1): Sequential(
... (0): ConvTranspose2d(1024, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
... (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (2): Sequential(
... (0): ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
... (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
... (2): ReLU()
... )
... (3): Sequential(
... (0): ConvTranspose2d(256, 1, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))
... (1): Sigmoid()
... )
... )
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import VAE
>>> model = VAE(model_config=model_config, decoder=decoder)
>>> model.decoder == decoder
... True
.. note::
Please note that this decoder is suitable for **all** models.
.. code-block::
>>> import torch
>>> input = torch.randn(2, 16)
>>> out = decoder(input)
>>> out.reconstruction.shape
... torch.Size([2, 1, 28, 28])
"""
def __init__(self, args: dict):
BaseDecoder.__init__(self)
self.input_dim = (1, 28, 28)
self.latent_dim = args.latent_dim
self.n_channels = 1
layers = nn.ModuleList()
layers.append(nn.Linear(args.latent_dim, 1024 * 4 * 4))
layers.append(
nn.Sequential(
nn.ConvTranspose2d(1024, 512, 3, 2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.ConvTranspose2d(512, 256, 3, 2, padding=1, output_padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.ConvTranspose2d(
256, self.n_channels, 3, 2, padding=1, output_padding=1
),
nn.Sigmoid(),
)
)
self.layers = layers
self.depth = len(layers)
def forward(self, z: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the reconstruction of the latent code
under the key `reconstruction`. Optional: The outputs of the layers specified in
`output_layer_levels` arguments are available under the keys `reconstruction_layer_i`
where i is the layer's level.
"""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth})."
f"Got ({output_layer_levels})"
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = z
for i in range(max_depth):
out = self.layers[i](out)
if i == 0:
out = out.reshape(z.shape[0], 1024, 4, 4)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"reconstruction_layer_{i+1}"] = out
if i + 1 == self.depth:
output["reconstruction"] = out
return output
class Discriminator_MNIST(BaseDiscriminator):
"""
A Convolutional discriminator Neural net suited for MNIST.
It can be built as follows:
.. code-block::
>>> from pythae.models.nn.benchmarks.mnist import Discriminator_MNIST
>>> from pythae.models import VAEGANConfig
>>> model_config = VAEGANConfig(input_dim=(1, 28, 28), latent_dim=16)
>>> discriminator = Discriminator_MNIST(model_config)
>>> discriminator
... Discriminator_MNIST(
... (layers): ModuleList(
... (0): Sequential(
... (0): Conv2d(1, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): ReLU()
... )
... (1): Sequential(
... (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): Tanh()
... )
... (2): Sequential(
... (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): ReLU()
... )
... (3): Sequential(
... (0): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
... (1): ReLU()
... )
... (4): Sequential(
... (0): Linear(in_features=1024, out_features=1, bias=True)
... (1): Sigmoid()
... )
... )
... )
and then passed to a :class:`pythae.models` instance
>>> from pythae.models import VAEGAN
>>> model = VAEGAN(model_config=model_config, discriminator=discriminator)
>>> model.discriminator == discriminator
... True
"""
def __init__(self, args: dict):
BaseDiscriminator.__init__(self)
self.input_dim = (1, 28, 28)
self.latent_dim = args.latent_dim
self.n_channels = 1
layers = nn.ModuleList()
layers.append(
nn.Sequential(
nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),
# nn.BatchNorm2d(128),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(128, 256, 4, 2, padding=1),
# nn.BatchNorm2d(256),
nn.Tanh(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(256, 512, 4, 2, padding=1),
# nn.BatchNorm2d(512),
nn.ReLU(),
)
)
layers.append(
nn.Sequential(
nn.Conv2d(512, 1024, 4, 2, padding=1),
# nn.BatchNorm2d(1024),
nn.ReLU(),
)
)
layers.append(nn.Sequential(nn.Linear(1024, 1), nn.Sigmoid()))
self.layers = layers
self.depth = len(layers)
def forward(self, x: torch.Tensor, output_layer_levels: List[int] = None):
"""Forward method
Args:
output_layer_levels (List[int]): The levels of the layers where the outputs are
extracted. If None, the last layer's output is returned. Default: None.
Returns:
ModelOutput: An instance of ModelOutput containing the adversarial score of the input
under the key `embedding`. Optional: The outputs of the layers specified in
`output_layer_levels` arguments are available under the keys `embedding_layer_i` where
i is the layer's level.
"""
output = ModelOutput()
max_depth = self.depth
if output_layer_levels is not None:
assert all(
self.depth >= levels > 0 or levels == -1
for levels in output_layer_levels
), (
f"Cannot output layer deeper than depth ({self.depth}). "
f"Got ({output_layer_levels})."
)
if -1 in output_layer_levels:
max_depth = self.depth
else:
max_depth = max(output_layer_levels)
out = x
for i in range(max_depth):
if i == 4:
out = out.reshape(x.shape[0], -1)
out = self.layers[i](out)
if output_layer_levels is not None:
if i + 1 in output_layer_levels:
output[f"embedding_layer_{i+1}"] = out
if i + 1 == self.depth:
output["embedding"] = out
return output
| 34.212598
| 126
| 0.517453
| 2,937
| 26,070
| 4.47157
| 0.06367
| 0.046067
| 0.064722
| 0.036549
| 0.871621
| 0.848169
| 0.834844
| 0.832864
| 0.82426
| 0.804005
| 0
| 0.056147
| 0.359187
| 26,070
| 761
| 127
| 34.257556
| 0.729977
| 0.517491
| 0
| 0.71
| 0
| 0
| 0.052951
| 0.020235
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.033333
| false
| 0
| 0.026667
| 0
| 0.093333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
430b4abb8d2c61955efd0f1bd666beb57d58ea84
| 2,078
|
py
|
Python
|
subseq_cracker/test.py
|
jindrahelcl/puzzlehunt-tools
|
b36bc98e1b53282f7e1177d6d82631bc33e5deb8
|
[
"MIT"
] | null | null | null |
subseq_cracker/test.py
|
jindrahelcl/puzzlehunt-tools
|
b36bc98e1b53282f7e1177d6d82631bc33e5deb8
|
[
"MIT"
] | null | null | null |
subseq_cracker/test.py
|
jindrahelcl/puzzlehunt-tools
|
b36bc98e1b53282f7e1177d6d82631bc33e5deb8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from utils import *
import unittest
class TestSubseqDiscont(unittest.TestCase):
def test_is_subseq(self):
self.assertTrue(is_subseq_discont([1, 3, 5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([1], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([1, 5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq_discont([], [1]))
self.assertTrue(is_subseq_discont([1], [1]))
self.assertTrue(is_subseq_discont([], []))
def test_is_not_subseq(self):
self.assertFalse(is_subseq([1, 2, 3, 4, 5, 5], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq_discont([1, 5, 4], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq_discont([5, 1], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq_discont([6], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq_discont([1], []))
class TestSubseq(unittest.TestCase):
def test_is_subseq(self):
self.assertTrue(is_subseq([1], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq([5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq([1, 2], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq([], [1, 2, 3, 4, 5]))
self.assertTrue(is_subseq([], [1]))
self.assertTrue(is_subseq([1], [1]))
self.assertTrue(is_subseq([], []))
def test_is_not_subseq(self):
self.assertFalse(is_subseq([1, 5], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([1, 3, 5], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([1, 2, 3, 4, 5, 5], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([1, 5, 4], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([5, 1], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([6], [1, 2, 3, 4, 5]))
self.assertFalse(is_subseq([1], []))
if __name__ == '__main__':
unittest.main()
| 43.291667
| 76
| 0.568816
| 336
| 2,078
| 3.339286
| 0.089286
| 0.221034
| 0.066845
| 0.089127
| 0.907308
| 0.907308
| 0.834225
| 0.806595
| 0.806595
| 0.803922
| 0
| 0.101172
| 0.219923
| 2,078
| 48
| 77
| 43.291667
| 0.590993
| 0.010106
| 0
| 0.153846
| 0
| 0
| 0.003889
| 0
| 0
| 0
| 0
| 0
| 0.74359
| 1
| 0.102564
| false
| 0
| 0.051282
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4314dd74952b4f0020755b1d41287a061e867ff3
| 55,353
|
py
|
Python
|
sdk/python/pulumi_azure/cdn/endpoint.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/cdn/endpoint.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/cdn/endpoint.py
|
ScriptBox99/pulumi-azure
|
1b8c6d5479ccabc39094741eac25a8ca44c8833a
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EndpointArgs', 'Endpoint']
@pulumi.input_type
class EndpointArgs:
def __init__(__self__, *,
origins: pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]],
profile_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
content_types_to_compresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_rules: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]] = None,
global_delivery_rule: Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[str]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
querystring_caching_behaviour: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Endpoint resource.
:param pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]] origins: The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
:param pulumi.Input[str] profile_name: The CDN Profile to which to attach the CDN Endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the CDN Endpoint.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compresses: An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
:param pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]] delivery_rules: Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]] geo_filters: A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
:param pulumi.Input['EndpointGlobalDeliveryRuleArgs'] global_delivery_rule: Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
:param pulumi.Input[bool] is_compression_enabled: Indicates whether compression is to be enabled.
:param pulumi.Input[bool] is_http_allowed: Defaults to `true`.
:param pulumi.Input[bool] is_https_allowed: Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
:param pulumi.Input[str] optimization_type: What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
:param pulumi.Input[str] origin_host_header: The host header CDN provider will send along with content requests to origins.
:param pulumi.Input[str] origin_path: The path used at for origin requests.
:param pulumi.Input[str] probe_path: the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
:param pulumi.Input[str] querystring_caching_behaviour: Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "origins", origins)
pulumi.set(__self__, "profile_name", profile_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if content_types_to_compresses is not None:
pulumi.set(__self__, "content_types_to_compresses", content_types_to_compresses)
if delivery_rules is not None:
pulumi.set(__self__, "delivery_rules", delivery_rules)
if geo_filters is not None:
pulumi.set(__self__, "geo_filters", geo_filters)
if global_delivery_rule is not None:
pulumi.set(__self__, "global_delivery_rule", global_delivery_rule)
if is_compression_enabled is not None:
pulumi.set(__self__, "is_compression_enabled", is_compression_enabled)
if is_http_allowed is not None:
pulumi.set(__self__, "is_http_allowed", is_http_allowed)
if is_https_allowed is not None:
pulumi.set(__self__, "is_https_allowed", is_https_allowed)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if optimization_type is not None:
pulumi.set(__self__, "optimization_type", optimization_type)
if origin_host_header is not None:
pulumi.set(__self__, "origin_host_header", origin_host_header)
if origin_path is not None:
pulumi.set(__self__, "origin_path", origin_path)
if probe_path is not None:
pulumi.set(__self__, "probe_path", probe_path)
if querystring_caching_behaviour is not None:
pulumi.set(__self__, "querystring_caching_behaviour", querystring_caching_behaviour)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def origins(self) -> pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]]:
"""
The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
"""
return pulumi.get(self, "origins")
@origins.setter
def origins(self, value: pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]]):
pulumi.set(self, "origins", value)
@property
@pulumi.getter(name="profileName")
def profile_name(self) -> pulumi.Input[str]:
"""
The CDN Profile to which to attach the CDN Endpoint.
"""
return pulumi.get(self, "profile_name")
@profile_name.setter
def profile_name(self, value: pulumi.Input[str]):
pulumi.set(self, "profile_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the CDN Endpoint.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="contentTypesToCompresses")
def content_types_to_compresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
"""
return pulumi.get(self, "content_types_to_compresses")
@content_types_to_compresses.setter
def content_types_to_compresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_types_to_compresses", value)
@property
@pulumi.getter(name="deliveryRules")
def delivery_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]]:
"""
Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
"""
return pulumi.get(self, "delivery_rules")
@delivery_rules.setter
def delivery_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]]):
pulumi.set(self, "delivery_rules", value)
@property
@pulumi.getter(name="geoFilters")
def geo_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]]:
"""
A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
"""
return pulumi.get(self, "geo_filters")
@geo_filters.setter
def geo_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]]):
pulumi.set(self, "geo_filters", value)
@property
@pulumi.getter(name="globalDeliveryRule")
def global_delivery_rule(self) -> Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']]:
"""
Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
"""
return pulumi.get(self, "global_delivery_rule")
@global_delivery_rule.setter
def global_delivery_rule(self, value: Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']]):
pulumi.set(self, "global_delivery_rule", value)
@property
@pulumi.getter(name="isCompressionEnabled")
def is_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether compression is to be enabled.
"""
return pulumi.get(self, "is_compression_enabled")
@is_compression_enabled.setter
def is_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_compression_enabled", value)
@property
@pulumi.getter(name="isHttpAllowed")
def is_http_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_http_allowed")
@is_http_allowed.setter
def is_http_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_http_allowed", value)
@property
@pulumi.getter(name="isHttpsAllowed")
def is_https_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_https_allowed")
@is_https_allowed.setter
def is_https_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_https_allowed", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="optimizationType")
def optimization_type(self) -> Optional[pulumi.Input[str]]:
"""
What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
"""
return pulumi.get(self, "optimization_type")
@optimization_type.setter
def optimization_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "optimization_type", value)
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> Optional[pulumi.Input[str]]:
"""
The host header CDN provider will send along with content requests to origins.
"""
return pulumi.get(self, "origin_host_header")
@origin_host_header.setter
def origin_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "origin_host_header", value)
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> Optional[pulumi.Input[str]]:
"""
The path used at for origin requests.
"""
return pulumi.get(self, "origin_path")
@origin_path.setter
def origin_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "origin_path", value)
@property
@pulumi.getter(name="probePath")
def probe_path(self) -> Optional[pulumi.Input[str]]:
"""
the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
"""
return pulumi.get(self, "probe_path")
@probe_path.setter
def probe_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "probe_path", value)
@property
@pulumi.getter(name="querystringCachingBehaviour")
def querystring_caching_behaviour(self) -> Optional[pulumi.Input[str]]:
"""
Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
"""
return pulumi.get(self, "querystring_caching_behaviour")
@querystring_caching_behaviour.setter
def querystring_caching_behaviour(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "querystring_caching_behaviour", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _EndpointState:
def __init__(__self__, *,
content_types_to_compresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_rules: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]] = None,
global_delivery_rule: Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']] = None,
host_name: Optional[pulumi.Input[str]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[str]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
querystring_caching_behaviour: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Endpoint resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compresses: An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
:param pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]] delivery_rules: Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]] geo_filters: A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
:param pulumi.Input['EndpointGlobalDeliveryRuleArgs'] global_delivery_rule: Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
:param pulumi.Input[str] host_name: A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_compression_enabled: Indicates whether compression is to be enabled.
:param pulumi.Input[bool] is_http_allowed: Defaults to `true`.
:param pulumi.Input[bool] is_https_allowed: Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
:param pulumi.Input[str] optimization_type: What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
:param pulumi.Input[str] origin_host_header: The host header CDN provider will send along with content requests to origins.
:param pulumi.Input[str] origin_path: The path used at for origin requests.
:param pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]] origins: The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
:param pulumi.Input[str] probe_path: the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
:param pulumi.Input[str] profile_name: The CDN Profile to which to attach the CDN Endpoint.
:param pulumi.Input[str] querystring_caching_behaviour: Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the CDN Endpoint.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if content_types_to_compresses is not None:
pulumi.set(__self__, "content_types_to_compresses", content_types_to_compresses)
if delivery_rules is not None:
pulumi.set(__self__, "delivery_rules", delivery_rules)
if geo_filters is not None:
pulumi.set(__self__, "geo_filters", geo_filters)
if global_delivery_rule is not None:
pulumi.set(__self__, "global_delivery_rule", global_delivery_rule)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if is_compression_enabled is not None:
pulumi.set(__self__, "is_compression_enabled", is_compression_enabled)
if is_http_allowed is not None:
pulumi.set(__self__, "is_http_allowed", is_http_allowed)
if is_https_allowed is not None:
pulumi.set(__self__, "is_https_allowed", is_https_allowed)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if optimization_type is not None:
pulumi.set(__self__, "optimization_type", optimization_type)
if origin_host_header is not None:
pulumi.set(__self__, "origin_host_header", origin_host_header)
if origin_path is not None:
pulumi.set(__self__, "origin_path", origin_path)
if origins is not None:
pulumi.set(__self__, "origins", origins)
if probe_path is not None:
pulumi.set(__self__, "probe_path", probe_path)
if profile_name is not None:
pulumi.set(__self__, "profile_name", profile_name)
if querystring_caching_behaviour is not None:
pulumi.set(__self__, "querystring_caching_behaviour", querystring_caching_behaviour)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="contentTypesToCompresses")
def content_types_to_compresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
"""
return pulumi.get(self, "content_types_to_compresses")
@content_types_to_compresses.setter
def content_types_to_compresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "content_types_to_compresses", value)
@property
@pulumi.getter(name="deliveryRules")
def delivery_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]]:
"""
Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
"""
return pulumi.get(self, "delivery_rules")
@delivery_rules.setter
def delivery_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointDeliveryRuleArgs']]]]):
pulumi.set(self, "delivery_rules", value)
@property
@pulumi.getter(name="geoFilters")
def geo_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]]:
"""
A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
"""
return pulumi.get(self, "geo_filters")
@geo_filters.setter
def geo_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointGeoFilterArgs']]]]):
pulumi.set(self, "geo_filters", value)
@property
@pulumi.getter(name="globalDeliveryRule")
def global_delivery_rule(self) -> Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']]:
"""
Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
"""
return pulumi.get(self, "global_delivery_rule")
@global_delivery_rule.setter
def global_delivery_rule(self, value: Optional[pulumi.Input['EndpointGlobalDeliveryRuleArgs']]):
pulumi.set(self, "global_delivery_rule", value)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[pulumi.Input[str]]:
"""
A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter(name="isCompressionEnabled")
def is_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether compression is to be enabled.
"""
return pulumi.get(self, "is_compression_enabled")
@is_compression_enabled.setter
def is_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_compression_enabled", value)
@property
@pulumi.getter(name="isHttpAllowed")
def is_http_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_http_allowed")
@is_http_allowed.setter
def is_http_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_http_allowed", value)
@property
@pulumi.getter(name="isHttpsAllowed")
def is_https_allowed(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_https_allowed")
@is_https_allowed.setter
def is_https_allowed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_https_allowed", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="optimizationType")
def optimization_type(self) -> Optional[pulumi.Input[str]]:
"""
What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
"""
return pulumi.get(self, "optimization_type")
@optimization_type.setter
def optimization_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "optimization_type", value)
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> Optional[pulumi.Input[str]]:
"""
The host header CDN provider will send along with content requests to origins.
"""
return pulumi.get(self, "origin_host_header")
@origin_host_header.setter
def origin_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "origin_host_header", value)
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> Optional[pulumi.Input[str]]:
"""
The path used at for origin requests.
"""
return pulumi.get(self, "origin_path")
@origin_path.setter
def origin_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "origin_path", value)
@property
@pulumi.getter
def origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]]]:
"""
The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
"""
return pulumi.get(self, "origins")
@origins.setter
def origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointOriginArgs']]]]):
pulumi.set(self, "origins", value)
@property
@pulumi.getter(name="probePath")
def probe_path(self) -> Optional[pulumi.Input[str]]:
"""
the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
"""
return pulumi.get(self, "probe_path")
@probe_path.setter
def probe_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "probe_path", value)
@property
@pulumi.getter(name="profileName")
def profile_name(self) -> Optional[pulumi.Input[str]]:
"""
The CDN Profile to which to attach the CDN Endpoint.
"""
return pulumi.get(self, "profile_name")
@profile_name.setter
def profile_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile_name", value)
@property
@pulumi.getter(name="querystringCachingBehaviour")
def querystring_caching_behaviour(self) -> Optional[pulumi.Input[str]]:
"""
Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
"""
return pulumi.get(self, "querystring_caching_behaviour")
@querystring_caching_behaviour.setter
def querystring_caching_behaviour(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "querystring_caching_behaviour", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the CDN Endpoint.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Endpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_types_to_compresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointDeliveryRuleArgs']]]]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointGeoFilterArgs']]]]] = None,
global_delivery_rule: Optional[pulumi.Input[pulumi.InputType['EndpointGlobalDeliveryRuleArgs']]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[str]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointOriginArgs']]]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
querystring_caching_behaviour: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
## Import
CDN Endpoints can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:cdn/endpoint:Endpoint example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1/endpoints/myendpoint1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compresses: An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointDeliveryRuleArgs']]]] delivery_rules: Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointGeoFilterArgs']]]] geo_filters: A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
:param pulumi.Input[pulumi.InputType['EndpointGlobalDeliveryRuleArgs']] global_delivery_rule: Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
:param pulumi.Input[bool] is_compression_enabled: Indicates whether compression is to be enabled.
:param pulumi.Input[bool] is_http_allowed: Defaults to `true`.
:param pulumi.Input[bool] is_https_allowed: Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
:param pulumi.Input[str] optimization_type: What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
:param pulumi.Input[str] origin_host_header: The host header CDN provider will send along with content requests to origins.
:param pulumi.Input[str] origin_path: The path used at for origin requests.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointOriginArgs']]]] origins: The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
:param pulumi.Input[str] probe_path: the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
:param pulumi.Input[str] profile_name: The CDN Profile to which to attach the CDN Endpoint.
:param pulumi.Input[str] querystring_caching_behaviour: Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the CDN Endpoint.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
CDN Endpoints can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:cdn/endpoint:Endpoint example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1/endpoints/myendpoint1
```
:param str resource_name: The name of the resource.
:param EndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content_types_to_compresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointDeliveryRuleArgs']]]]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointGeoFilterArgs']]]]] = None,
global_delivery_rule: Optional[pulumi.Input[pulumi.InputType['EndpointGlobalDeliveryRuleArgs']]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[str]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointOriginArgs']]]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
querystring_caching_behaviour: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EndpointArgs.__new__(EndpointArgs)
__props__.__dict__["content_types_to_compresses"] = content_types_to_compresses
__props__.__dict__["delivery_rules"] = delivery_rules
__props__.__dict__["geo_filters"] = geo_filters
__props__.__dict__["global_delivery_rule"] = global_delivery_rule
__props__.__dict__["is_compression_enabled"] = is_compression_enabled
__props__.__dict__["is_http_allowed"] = is_http_allowed
__props__.__dict__["is_https_allowed"] = is_https_allowed
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["optimization_type"] = optimization_type
__props__.__dict__["origin_host_header"] = origin_host_header
__props__.__dict__["origin_path"] = origin_path
if origins is None and not opts.urn:
raise TypeError("Missing required property 'origins'")
__props__.__dict__["origins"] = origins
__props__.__dict__["probe_path"] = probe_path
if profile_name is None and not opts.urn:
raise TypeError("Missing required property 'profile_name'")
__props__.__dict__["profile_name"] = profile_name
__props__.__dict__["querystring_caching_behaviour"] = querystring_caching_behaviour
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["host_name"] = None
super(Endpoint, __self__).__init__(
'azure:cdn/endpoint:Endpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
content_types_to_compresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delivery_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointDeliveryRuleArgs']]]]] = None,
geo_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointGeoFilterArgs']]]]] = None,
global_delivery_rule: Optional[pulumi.Input[pulumi.InputType['EndpointGlobalDeliveryRuleArgs']]] = None,
host_name: Optional[pulumi.Input[str]] = None,
is_compression_enabled: Optional[pulumi.Input[bool]] = None,
is_http_allowed: Optional[pulumi.Input[bool]] = None,
is_https_allowed: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optimization_type: Optional[pulumi.Input[str]] = None,
origin_host_header: Optional[pulumi.Input[str]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
origins: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointOriginArgs']]]]] = None,
probe_path: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
querystring_caching_behaviour: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Endpoint':
"""
Get an existing Endpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] content_types_to_compresses: An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointDeliveryRuleArgs']]]] delivery_rules: Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointGeoFilterArgs']]]] geo_filters: A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
:param pulumi.Input[pulumi.InputType['EndpointGlobalDeliveryRuleArgs']] global_delivery_rule: Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
:param pulumi.Input[str] host_name: A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created.
:param pulumi.Input[bool] is_compression_enabled: Indicates whether compression is to be enabled.
:param pulumi.Input[bool] is_http_allowed: Defaults to `true`.
:param pulumi.Input[bool] is_https_allowed: Defaults to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
:param pulumi.Input[str] optimization_type: What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
:param pulumi.Input[str] origin_host_header: The host header CDN provider will send along with content requests to origins.
:param pulumi.Input[str] origin_path: The path used at for origin requests.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EndpointOriginArgs']]]] origins: The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
:param pulumi.Input[str] probe_path: the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
:param pulumi.Input[str] profile_name: The CDN Profile to which to attach the CDN Endpoint.
:param pulumi.Input[str] querystring_caching_behaviour: Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the CDN Endpoint.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _EndpointState.__new__(_EndpointState)
__props__.__dict__["content_types_to_compresses"] = content_types_to_compresses
__props__.__dict__["delivery_rules"] = delivery_rules
__props__.__dict__["geo_filters"] = geo_filters
__props__.__dict__["global_delivery_rule"] = global_delivery_rule
__props__.__dict__["host_name"] = host_name
__props__.__dict__["is_compression_enabled"] = is_compression_enabled
__props__.__dict__["is_http_allowed"] = is_http_allowed
__props__.__dict__["is_https_allowed"] = is_https_allowed
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["optimization_type"] = optimization_type
__props__.__dict__["origin_host_header"] = origin_host_header
__props__.__dict__["origin_path"] = origin_path
__props__.__dict__["origins"] = origins
__props__.__dict__["probe_path"] = probe_path
__props__.__dict__["profile_name"] = profile_name
__props__.__dict__["querystring_caching_behaviour"] = querystring_caching_behaviour
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
return Endpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentTypesToCompresses")
def content_types_to_compresses(self) -> pulumi.Output[Sequence[str]]:
"""
An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
"""
return pulumi.get(self, "content_types_to_compresses")
@property
@pulumi.getter(name="deliveryRules")
def delivery_rules(self) -> pulumi.Output[Optional[Sequence['outputs.EndpointDeliveryRule']]]:
"""
Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A `delivery_rule` blocks as defined below.
"""
return pulumi.get(self, "delivery_rules")
@property
@pulumi.getter(name="geoFilters")
def geo_filters(self) -> pulumi.Output[Optional[Sequence['outputs.EndpointGeoFilter']]]:
"""
A set of Geo Filters for this CDN Endpoint. Each `geo_filter` block supports fields documented below.
"""
return pulumi.get(self, "geo_filters")
@property
@pulumi.getter(name="globalDeliveryRule")
def global_delivery_rule(self) -> pulumi.Output[Optional['outputs.EndpointGlobalDeliveryRule']]:
"""
Actions that are valid for all resources regardless of any conditions. A `global_delivery_rule` block as defined below.
"""
return pulumi.get(self, "global_delivery_rule")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="isCompressionEnabled")
def is_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether compression is to be enabled.
"""
return pulumi.get(self, "is_compression_enabled")
@property
@pulumi.getter(name="isHttpAllowed")
def is_http_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_http_allowed")
@property
@pulumi.getter(name="isHttpsAllowed")
def is_https_allowed(self) -> pulumi.Output[Optional[bool]]:
"""
Defaults to `true`.
"""
return pulumi.get(self, "is_https_allowed")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="optimizationType")
def optimization_type(self) -> pulumi.Output[Optional[str]]:
"""
What types of optimization should this CDN Endpoint optimize for? Possible values include `DynamicSiteAcceleration`, `GeneralMediaStreaming`, `GeneralWebDelivery`, `LargeFileDownload` and `VideoOnDemandMediaStreaming`.
"""
return pulumi.get(self, "optimization_type")
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> pulumi.Output[Optional[str]]:
"""
The host header CDN provider will send along with content requests to origins.
"""
return pulumi.get(self, "origin_host_header")
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> pulumi.Output[str]:
"""
The path used at for origin requests.
"""
return pulumi.get(self, "origin_path")
@property
@pulumi.getter
def origins(self) -> pulumi.Output[Sequence['outputs.EndpointOrigin']]:
"""
The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each `origin` block supports fields documented below.
"""
return pulumi.get(self, "origins")
@property
@pulumi.getter(name="probePath")
def probe_path(self) -> pulumi.Output[str]:
"""
the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the `origin_path`.
"""
return pulumi.get(self, "probe_path")
@property
@pulumi.getter(name="profileName")
def profile_name(self) -> pulumi.Output[str]:
"""
The CDN Profile to which to attach the CDN Endpoint.
"""
return pulumi.get(self, "profile_name")
@property
@pulumi.getter(name="querystringCachingBehaviour")
def querystring_caching_behaviour(self) -> pulumi.Output[Optional[str]]:
"""
Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. `NotSet` value can be used for `Premium Verizon` CDN profile. Defaults to `IgnoreQueryString`.
"""
return pulumi.get(self, "querystring_caching_behaviour")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the CDN Endpoint.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 55.799395
| 307
| 0.690423
| 6,734
| 55,353
| 5.472379
| 0.043808
| 0.092236
| 0.080948
| 0.047163
| 0.950313
| 0.942335
| 0.934276
| 0.925402
| 0.92296
| 0.913001
| 0
| 0.001972
| 0.212256
| 55,353
| 991
| 308
| 55.855701
| 0.843157
| 0.369483
| 0
| 0.843594
| 1
| 0
| 0.133675
| 0.051486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166389
| false
| 0.001664
| 0.011647
| 0
| 0.27787
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4a78f02b39ccd30d3096631a68dba9414a8d18ad
| 7,937
|
py
|
Python
|
tests/components/cert_expiry/test_sensors.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 2
|
2021-05-19T19:05:08.000Z
|
2021-06-06T06:51:05.000Z
|
tests/components/cert_expiry/test_sensors.py
|
jrhubott/core
|
89fe232643134f283c041537e9f6841f47dc1c5e
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
tests/components/cert_expiry/test_sensors.py
|
jrhubott/core
|
89fe232643134f283c041537e9f6841f47dc1c5e
|
[
"Apache-2.0"
] | 2
|
2021-07-14T20:22:04.000Z
|
2021-09-22T08:56:16.000Z
|
"""Tests for the Cert Expiry sensors."""
from datetime import timedelta
import socket
import ssl
from homeassistant.components.cert_expiry.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from homeassistant.const import CONF_HOST, CONF_PORT, STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.util.dt import utcnow
from .const import HOST, PORT
from .helpers import future_timestamp, static_datetime
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
@patch("homeassistant.util.dt.utcnow", return_value=static_datetime())
async def test_async_setup_entry(mock_now, hass):
"""Test async_setup_entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
timestamp = future_timestamp(100)
with patch(
"homeassistant.components.cert_expiry.get_cert_expiry_timestamp",
return_value=timestamp,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == timestamp.isoformat()
assert state.attributes.get("error") == "None"
assert state.attributes.get("is_valid")
async def test_async_setup_entry_bad_cert(hass):
"""Test async_setup_entry with a bad/expired cert."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
with patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=ssl.SSLError("some error"),
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.attributes.get("error") == "some error"
assert not state.attributes.get("is_valid")
async def test_async_setup_entry_host_unavailable(hass):
"""Test async_setup_entry when host is unavailable."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
with patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=socket.gaierror,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id) is False
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_SETUP_RETRY
next_update = utcnow() + timedelta(seconds=45)
async_fire_time_changed(hass, next_update)
with patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=socket.gaierror,
):
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is None
async def test_update_sensor(hass):
"""Test async_update for sensor."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
starting_time = static_datetime()
timestamp = future_timestamp(100)
with patch("homeassistant.util.dt.utcnow", return_value=starting_time), patch(
"homeassistant.components.cert_expiry.get_cert_expiry_timestamp",
return_value=timestamp,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == timestamp.isoformat()
assert state.attributes.get("error") == "None"
assert state.attributes.get("is_valid")
next_update = starting_time + timedelta(hours=24)
with patch("homeassistant.util.dt.utcnow", return_value=next_update), patch(
"homeassistant.components.cert_expiry.get_cert_expiry_timestamp",
return_value=timestamp,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=24))
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == timestamp.isoformat()
assert state.attributes.get("error") == "None"
assert state.attributes.get("is_valid")
async def test_update_sensor_network_errors(hass):
"""Test async_update for sensor."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_PORT: PORT},
unique_id=f"{HOST}:{PORT}",
)
starting_time = static_datetime()
timestamp = future_timestamp(100)
with patch("homeassistant.util.dt.utcnow", return_value=starting_time), patch(
"homeassistant.components.cert_expiry.get_cert_expiry_timestamp",
return_value=timestamp,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == timestamp.isoformat()
assert state.attributes.get("error") == "None"
assert state.attributes.get("is_valid")
next_update = starting_time + timedelta(hours=24)
with patch("homeassistant.util.dt.utcnow", return_value=next_update), patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=socket.gaierror,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=24))
await hass.async_block_till_done()
next_update = starting_time + timedelta(hours=48)
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state.state == STATE_UNAVAILABLE
with patch("homeassistant.util.dt.utcnow", return_value=next_update), patch(
"homeassistant.components.cert_expiry.get_cert_expiry_timestamp",
return_value=timestamp,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=48))
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == timestamp.isoformat()
assert state.attributes.get("error") == "None"
assert state.attributes.get("is_valid")
next_update = starting_time + timedelta(hours=72)
with patch("homeassistant.util.dt.utcnow", return_value=next_update), patch(
"homeassistant.components.cert_expiry.helper.get_cert",
side_effect=ssl.SSLError("something bad"),
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=72))
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state is not None
assert state.state == STATE_UNKNOWN
assert state.attributes.get("error") == "something bad"
assert not state.attributes.get("is_valid")
next_update = starting_time + timedelta(hours=96)
with patch("homeassistant.util.dt.utcnow", return_value=next_update), patch(
"homeassistant.components.cert_expiry.helper.get_cert", side_effect=Exception()
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=96))
await hass.async_block_till_done()
state = hass.states.get("sensor.cert_expiry_timestamp_example_com")
assert state.state == STATE_UNAVAILABLE
| 36.242009
| 87
| 0.714124
| 1,006
| 7,937
| 5.364811
| 0.097416
| 0.069298
| 0.052807
| 0.073374
| 0.869557
| 0.836946
| 0.820271
| 0.803595
| 0.787289
| 0.787289
| 0
| 0.004775
| 0.182059
| 7,937
| 218
| 88
| 36.408257
| 0.826556
| 0.004284
| 0
| 0.770588
| 0
| 0
| 0.190872
| 0.162008
| 0
| 0
| 0
| 0
| 0.247059
| 1
| 0
| false
| 0
| 0.064706
| 0
| 0.064706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a7f1d42396159a0acbe5129d01d6150e79f1732
| 4,791
|
py
|
Python
|
tensorflow/et0/test_chinese_map/generate_shp_from_txt.py
|
waterfallhyb/portfolio-examples
|
46ff40f593c88979495f0987f857d056f8addaf7
|
[
"MIT"
] | 28
|
2020-09-26T21:36:32.000Z
|
2021-07-04T03:40:45.000Z
|
tensorflow/et0/test_chinese_map/generate_shp_from_txt.py
|
waterfallhyb/portfolio-examples
|
46ff40f593c88979495f0987f857d056f8addaf7
|
[
"MIT"
] | null | null | null |
tensorflow/et0/test_chinese_map/generate_shp_from_txt.py
|
waterfallhyb/portfolio-examples
|
46ff40f593c88979495f0987f857d056f8addaf7
|
[
"MIT"
] | 1
|
2020-09-28T02:07:02.000Z
|
2020-09-28T02:07:02.000Z
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import numpy as np
def process_prs(file):
f = open(file)
prs_list = []
file_lines = f.readlines()
for file_line in file_lines:
file_split = file_line.split()
name = file_split[0]
x_temp = float(file_split[2])
y_temp = float(file_split[1])
x = np.floor(x_temp / 100) + ((x_temp / 100 - np.floor(x_temp / 100)) * 100) / 60
y = np.floor(y_temp / 100) + ((y_temp / 100 - np.floor(y_temp / 100)) * 100) / 60
if float(file_split[7]) < 20000:
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
attr_4 = float(file_split[7]) / 10
else:
attr_3 = (float(file_split[3]) - 100000) / 10
attr_4 = float(file_split[7]) / 10
else:
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
attr_4 = (float(file_split[7]) - 20000) / 10
else:
attr_3 = (float(file_split[3]) - 100000) / 10
attr_4 = (float(file_split[7]) - 20000) / 10
text = name + '\t' + str(x) + '\t' + str(y) + '\t' + str(attr_3) + '\t' + str(attr_4) + '\n'
prs_list.append(text)
return prs_list
def process_rhu(file):
f = open(file)
rhu_list = []
file_lines = f.readlines()
for file_line in file_lines:
file_split = file_line.split()
if float(file_split[7]) < 32760:
name = file_split[0]
x_temp = float(file_split[2])
y_temp = float(file_split[1])
x = np.floor(x_temp / 100) + ((x_temp / 100 - np.floor(x_temp / 100)) * 100) / 60
y = np.floor(y_temp / 100) + ((y_temp / 100 - np.floor(y_temp / 100)) * 100) / 60
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
attr_4 = float(file_split[7]) / 100
else:
attr_3 = float(file_split[3] - 100000) / 10
attr_4 = float(file_split[7]) / 100
text = name + '\t' + str(x) + '\t' + str(y) + '\t' + str(attr_3) + '\t' + str(attr_4) + '\n'
rhu_list.append(text)
return rhu_list
def process_tem(file):
f = open(file)
tem_list = []
file_lines = f.readlines()
for file_line in file_lines:
file_split = file_line.split()
if float(file_split[7]) < 32760 and float(file_split[8]) < 32760 and float(file_split[9]) < 32760:
name = file_split[0]
x_temp = float(file_split[2])
y_temp = float(file_split[1])
x = np.floor(x_temp / 100) + ((x_temp / 100 - np.floor(x_temp / 100)) * 100) / 60
y = np.floor(y_temp / 100) + ((y_temp / 100 - np.floor(y_temp / 100)) * 100) / 60
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
else:
attr_3 = (float(file_split[3]) - 100000) / 10
attr_4 = float(file_split[7]) / 10 + ((attr_3 / 100) * 0.65)
attr_5 = float(file_split[8]) / 10 + ((attr_3 / 100) * 0.65)
attr_6 = float(file_split[9]) / 10 + ((attr_3 / 100) * 0.65)
text = name + '\t' + str(x) + '\t' + str(y) + '\t' + str(attr_3) + '\t' + str(attr_4) + '\t' + str(attr_5) + '\t' + str(attr_6) + '\n'
tem_list.append(text)
return tem_list
def process_win(file):
f = open(file)
win_list = []
file_lines = f.readlines()
for file_line in file_lines:
file_split = file_line.split()
if float(file_split[7]) < 32760:
name = file_split[0]
x_temp = float(file_split[2])
y_temp = float(file_split[1])
x = np.floor(x_temp / 100) + ((x_temp / 100 - np.floor(x_temp / 100)) * 100) / 60
y = np.floor(y_temp / 100) + ((y_temp / 100 - np.floor(y_temp / 100)) * 100) / 60
if float(file_split[7]) < 1000:
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
attr_4 = float(file_split[7]) / 10
else:
attr_3 = (float(file_split[3]) - 100000) / 10
attr_4 = float(file_split[7]) / 10
else:
if float(file_split[3]) < 100000:
attr_3 = float(file_split[3]) / 10
attr_4 = (float(file_split[7]) - 1000) / 10
else:
attr_3 = (float(file_split[3]) - 100000) / 10
attr_4 = (float(file_split[7]) - 1000) / 10
text = name + '\t' + str(x) + '\t' + str(y) + '\t' + str(attr_3) + '\t' + str(attr_4) + '\n'
win_list.append(text)
return win_list
| 42.39823
| 146
| 0.504279
| 690
| 4,791
| 3.275362
| 0.089855
| 0.215044
| 0.284956
| 0.119469
| 0.84646
| 0.825221
| 0.819469
| 0.80531
| 0.802655
| 0.79469
| 0
| 0.127035
| 0.346066
| 4,791
| 112
| 147
| 42.776786
| 0.594319
| 0.011271
| 0
| 0.752475
| 0
| 0
| 0.009293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039604
| false
| 0
| 0.009901
| 0
| 0.089109
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4aadc6d21d045696cdcd9adea4e18d4c25a1424f
| 15,651
|
py
|
Python
|
test/save_load.py
|
yamada-github-account/cpprb
|
0e37e8deef95156c5b0ad8e5bce1577f612e90fa
|
[
"MIT"
] | 13
|
2019-07-28T14:51:56.000Z
|
2020-06-22T04:09:06.000Z
|
test/save_load.py
|
yamada-github-account/cpprb
|
0e37e8deef95156c5b0ad8e5bce1577f612e90fa
|
[
"MIT"
] | 1
|
2019-12-01T23:08:48.000Z
|
2019-12-03T04:16:42.000Z
|
test/save_load.py
|
yamada-github-account/cpprb
|
0e37e8deef95156c5b0ad8e5bce1577f612e90fa
|
[
"MIT"
] | 2
|
2020-04-22T23:10:42.000Z
|
2020-07-28T14:23:55.000Z
|
import os
import unittest
import numpy as np
from cpprb import ReplayBuffer, PrioritizedReplayBuffer
def v(num: int, fname: str):
return os.path.join(os.path.dirname(__file__), f"v{num}", fname)
class TestReplayBuffer(unittest.TestCase):
def test_basic(self):
"""
Basic Test Case
Loaded buffer have same transitions with saved one.
"""
buffer_size = 4
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict)
rb2 = ReplayBuffer(buffer_size, env_dict)
rb3 = ReplayBuffer(buffer_size, env_dict)
a = [1, 2, 3, 4]
rb1.add(a=a)
fname = "basic.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["a"], t3["a"])
def test_smaller_buffer(self):
"""
Load to smaller buffer
Loaded buffer only stored last buffer_size transitions
"""
buffer_size1 = 10
buffer_size2 = 4
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size1, env_dict)
rb2 = ReplayBuffer(buffer_size2, env_dict)
rb3 = ReplayBuffer(buffer_size2, env_dict)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
fname = "smaller.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"][-buffer_size2:],t2["a"])
def test_load_to_filled_buffer(self):
"""
Load to already filled buffer
Add to transitions
"""
buffer_size1 = 10
buffer_size2 = 10
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size1, env_dict)
rb2 = ReplayBuffer(buffer_size2, env_dict)
rb3 = ReplayBuffer(buffer_size2, env_dict)
a = [1, 2, 3, 4]
b = [5, 6]
rb1.add(a=a)
rb2.add(a=b)
rb3.add(a=b)
fname="filled.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"][len(b):])
np.testing.assert_allclose(t1["a"], t3["a"][len(b):])
def test_load_Nstep(self):
"""
Load Nstep transitions
"""
buffer_size = 10
env_dict = {"done": {}}
Nstep = {"size": 3, "gamma": 0.99}
rb1 = ReplayBuffer(buffer_size, env_dict, Nstep=Nstep)
rb2 = ReplayBuffer(buffer_size, env_dict, Nstep=Nstep)
rb3 = ReplayBuffer(buffer_size, env_dict, Nstep=Nstep)
d = [0, 0, 0, 0, 1]
rb1.add(done=d)
rb1.on_episode_end()
fname="Nstep.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["done"], t2["done"])
np.testing.assert_allclose(t1["done"], t3["done"])
def test_Nstep_incompatibility(self):
"""
Raise ValueError when Nstep incompatibility
"""
buffer_size = 10
env_dict = {"done": {}}
Nstep = {"size": 3, "gamma": 0.99}
rb1 = ReplayBuffer(buffer_size, env_dict, Nstep=Nstep)
rb2 = ReplayBuffer(buffer_size, env_dict)
rb3 = ReplayBuffer(buffer_size, env_dict)
d = [0, 0, 0, 0, 1]
rb1.add(done=d)
rb1.on_episode_end()
fname="Nstep_raise.npz"
rb1.save_transitions(fname)
with self.assertRaises(ValueError):
rb2.load_transitions(fname)
with self.assertRaises(ValueError):
rb3.load_transitions(v(1,fname))
def test_next_of(self):
"""
Load next_of transitions with safe mode
For safe mode, next_of is not necessary at loaded buffer.
"""
buffer_size = 10
env_dict1 = {"a": {}}
env_dict2 = {"a": {}, "next_a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict1, next_of="a")
rb2 = ReplayBuffer(buffer_size, env_dict2)
rb3 = ReplayBuffer(buffer_size, env_dict2)
a = [1, 2, 3, 4, 5, 6]
rb1.add(a=a[:-1], next_a=a[1:])
fname="next_of.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_unsafe_next_of(self):
"""
Load next_of transitions with unsafe mode
"""
buffer_size = 10
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a")
a = [1, 2, 3, 4, 5, 6]
rb1.add(a=a[:-1], next_a=a[1:])
fname="unsafe_next_of.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_unsafe_next_of_already_filled(self):
"""
Load unsafe next_of transitions with already filled buffer
"""
buffer_size = 10
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a")
a = [1, 2, 3, 4, 5, 6]
b = [7, 8, 9]
rb1.add(a=a[:-1], next_a=a[1:])
rb2.add(a=b[:-1], next_a=b[1:])
rb3.add(a=b[:-1], next_a=b[1:])
fname="unsafe_next_of_already.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
self.assertEqual(rb1.get_stored_size()+len(b)-1, rb2.get_stored_size())
self.assertEqual(rb1.get_stored_size()+len(b)-1, rb3.get_stored_size())
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"][len(b)-1:])
np.testing.assert_allclose(t1["next_a"], t2["next_a"][len(b)-1:])
np.testing.assert_allclose(t1["a"], t3["a"][len(b)-1:])
np.testing.assert_allclose(t1["next_a"], t3["next_a"][len(b)-1:])
def test_incompatible_unsafe_next_of(self):
"""
Load incompatible next_of transitions with unsafe mode
"""
buffer_size = 10
env_dict1 = {"a": {}}
env_dict2 = {"a": {}, "next_a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict1, next_of="a")
rb2 = ReplayBuffer(buffer_size, env_dict2)
rb3 = ReplayBuffer(buffer_size, env_dict2)
a = [1, 2, 3, 4, 5, 6]
rb1.add(a=a[:-1], next_a=a[1:])
fname="unsafe_incompatible_next_of.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_fulled_unsafe_next_of(self):
"""
Load with already fulled buffer
"""
buffer_size = 10
env_dict = {"a": {}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a")
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
rb1.add(a=a[:-1], next_a=a[1:])
fname="fulled_unsafe_next_of.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_stack_compress(self):
"""
Load stack_compress transitions
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict, stack_compress="a")
rb3 = ReplayBuffer(buffer_size, env_dict, stack_compress="a")
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]]
rb1.add(a=a)
fname="stack_compress.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["a"], t3["a"])
def test_incompatible_stack_compress(self):
"""
Load incompatible stack_compress transitions with safe mode
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict)
rb3 = ReplayBuffer(buffer_size, env_dict)
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]]
rb1.add(a=a)
fname="incompatible_stack_compress.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(fname)
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["a"], t3["a"])
def test_incompatible_unsafe_stack_compress(self):
"""
Load incompatible stack_compress transitions with unsafe mode
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict)
rb3 = ReplayBuffer(buffer_size, env_dict)
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]]
rb1.add(a=a)
fname="incompatible_unsafe_stack_compress.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(fname)
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["a"], t3["a"])
def test_next_of_stack_compress(self):
"""
Load next_of and stack_compress transitions
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8]]
rb1.add(a=a[:-1], next_a=a[1:])
fname="next_of_stack_compress.npz"
rb1.save_transitions(fname)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_unsafe_next_of_stack_compress(self):
"""
Load next_of and stack_compress transitions
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8]]
rb1.add(a=a[:-1], next_a=a[1:])
fname="unsafe_next_of_stack_compress.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
def test_unsafe_fulled_next_of_stack_compress(self):
"""
Load unsafe fulled next_of and stack_compress transitions
"""
buffer_size = 10
env_dict = {"a": {"shape": 3}}
rb1 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb2 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
rb3 = ReplayBuffer(buffer_size, env_dict, next_of="a", stack_compress="a")
a = [[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9],
[8, 9,10],
[9,10,11],
[10,11,12],
[11,12,13]]
rb1.add(a=a[:-1], next_a=a[1:])
fname="unsafe_fulled_next_of_stack_compress.npz"
rb1.save_transitions(fname, safe=False)
rb2.load_transitions(fname)
rb3.load_transitions(v(1,fname))
t1 = rb1.get_all_transitions()
t2 = rb2.get_all_transitions()
t3 = rb3.get_all_transitions()
np.testing.assert_allclose(t1["a"], t2["a"])
np.testing.assert_allclose(t1["next_a"], t2["next_a"])
np.testing.assert_allclose(t1["a"], t3["a"])
np.testing.assert_allclose(t1["next_a"], t3["next_a"])
if __name__ == "__main__":
unittest.main()
| 30.62818
| 82
| 0.573765
| 2,113
| 15,651
| 4.005206
| 0.055372
| 0.067352
| 0.090393
| 0.122297
| 0.893064
| 0.882311
| 0.848872
| 0.842491
| 0.834338
| 0.800544
| 0
| 0.053169
| 0.280174
| 15,651
| 510
| 83
| 30.688235
| 0.698029
| 0.054374
| 0
| 0.780415
| 0
| 0
| 0.05174
| 0.017362
| 0
| 0
| 0
| 0
| 0.145401
| 1
| 0.050445
| false
| 0
| 0.011869
| 0.002967
| 0.068249
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4367bad6a4096672f162b6fae639b4481d7e2d40
| 2,357
|
py
|
Python
|
test/test_tri.py
|
gbeltramo/delcechfiltr
|
3b7a93fbe18b2fdc3238a3c716daaccb0805bc80
|
[
"MIT"
] | null | null | null |
test/test_tri.py
|
gbeltramo/delcechfiltr
|
3b7a93fbe18b2fdc3238a3c716daaccb0805bc80
|
[
"MIT"
] | null | null | null |
test/test_tri.py
|
gbeltramo/delcechfiltr
|
3b7a93fbe18b2fdc3238a3c716daaccb0805bc80
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.spatial.distance import euclidean
import delcechfiltr.tri
def test_circumradius_2D():
n, d = 3, 2 # number points and dimension
for i in range(10):
np.random.seed(i)
vertices = np.random.rand(n, d)
radius = delcechfiltr.tri.circumradius(vertices)
center = delcechfiltr.tri.circumcenter(vertices)
dist = [euclidean(center, x) for x in vertices]
err_msg = "mismatch between miniball center and \n" \
"miniball radius."
assert all([np.isclose(radius, dist_i) or radius > dist_i
for dist_i in dist]), err_msg
def test_circumradius_3D():
n, d = 3, 3 # number points and dimension
for i in range(10):
np.random.seed(i)
vertices = np.random.rand(n, d)
radius = delcechfiltr.tri.circumradius(vertices)
center = delcechfiltr.tri.circumcenter(vertices)
dist = [euclidean(center, x) for x in vertices]
err_msg = "mismatch between miniball center and \n" \
"miniball radius."
assert all([np.isclose(radius, dist_i) or radius > dist_i
for dist_i in dist]), err_msg
def test_cech_param_2D():
n, d = 3, 2 # number points and dimension
for i in range(10):
np.random.seed(i)
vertices = np.random.rand(n, d)
radius = delcechfiltr.tri.cech_parameter(vertices)
center = delcechfiltr.tri.miniball_center(vertices)
dist = [euclidean(center, x) for x in vertices]
err_msg = "mismatch between miniball center and \n" \
"miniball radius."
assert all([np.isclose(radius, dist_i) or radius > dist_i
for dist_i in dist]), err_msg
def test_cech_param_3D():
n, d = 3, 3 # number points and dimension
for i in range(10):
np.random.seed(i)
vertices = np.random.rand(n, d)
radius = delcechfiltr.tri.cech_parameter(vertices)
center = delcechfiltr.tri.miniball_center(vertices)
dist = [euclidean(center, x) for x in vertices]
err_msg = "mismatch between miniball center and \n" \
"miniball radius."
assert all([np.isclose(radius, dist_i) or radius > dist_i
for dist_i in dist]), err_msg
| 42.089286
| 66
| 0.602885
| 311
| 2,357
| 4.459807
| 0.160772
| 0.043259
| 0.063446
| 0.069214
| 0.923576
| 0.923576
| 0.923576
| 0.923576
| 0.923576
| 0.923576
| 0
| 0.012195
| 0.3042
| 2,357
| 55
| 67
| 42.854545
| 0.833537
| 0.047094
| 0
| 0.862745
| 0
| 0
| 0.10064
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.078431
| false
| 0
| 0.058824
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43a994eb334120f6e88d5b8ceecff0d3278423cd
| 1,768
|
py
|
Python
|
app/suggestion/forms.py
|
HanaPearlman/maps4all
|
3ec11421adb4f01561f23abc5cab41ccb25fe0e1
|
[
"MIT"
] | 18
|
2016-10-17T22:08:40.000Z
|
2020-07-15T14:19:12.000Z
|
app/suggestion/forms.py
|
HanaPearlman/maps4all
|
3ec11421adb4f01561f23abc5cab41ccb25fe0e1
|
[
"MIT"
] | 10
|
2016-12-12T23:52:20.000Z
|
2017-03-20T01:56:22.000Z
|
app/suggestion/forms.py
|
HanaPearlman/maps4all
|
3ec11421adb4f01561f23abc5cab41ccb25fe0e1
|
[
"MIT"
] | 2
|
2017-03-29T18:14:16.000Z
|
2017-08-27T15:23:27.000Z
|
from flask.ext.wtf import Form
from wtforms.fields import SubmitField, TextField, StringField
from wtforms.validators import Email, InputRequired, Length
class SuggestionBasicForm(Form):
contact_name = TextField(
'Contact Name',
validators=[InputRequired(), Length(1, 500)]
)
contact_email = TextField(
'Email',
validators=[InputRequired(), Length(1, 500), Email()]
)
contact_phone_number = TextField(
'Phone Number',
validators=[InputRequired(), Length(1, 64)]
)
name = StringField('Resource Name', validators=[
InputRequired(),
Length(1, 500)
])
address = StringField('Resource Address', validators=[
InputRequired(),
Length(1, 500)
])
suggestion_text = TextField('Suggestion', validators=[
InputRequired()
])
submit = SubmitField('Submit')
class SuggestionAdvancedForm(Form):
""" CURRENTLY NOT IN USE
Intention is to use this as an advanced suggestion form allowing users to
also fill out descriptor values
"""
contact_name = TextField(
'Contact Name',
validators=[InputRequired(), Length(1, 500)]
)
contact_email = TextField(
'Email',
validators=[InputRequired(), Length(1, 500), Email()]
)
contact_phone_number = TextField(
'Phone Number',
validators=[InputRequired(), Length(1, 64)]
)
name = StringField('Resource Name', validators=[
InputRequired(),
Length(1, 500)
])
address = StringField('Resource Address', validators=[
InputRequired(),
Length(1, 500)
])
suggestion_text = TextField('Suggestion', validators=[
InputRequired()
])
submit = SubmitField('Submit')
| 29.466667
| 77
| 0.627828
| 164
| 1,768
| 6.707317
| 0.292683
| 0.250909
| 0.263636
| 0.272727
| 0.732727
| 0.732727
| 0.732727
| 0.732727
| 0.732727
| 0.732727
| 0
| 0.028832
| 0.254525
| 1,768
| 60
| 78
| 29.466667
| 0.805766
| 0.071267
| 0
| 0.792453
| 0
| 0
| 0.091189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056604
| 0
| 0.358491
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
43b2b3de6d28e9a785194b5d1f42e4ed41a98b8e
| 17,156
|
py
|
Python
|
dfirtrack_main/tests/location/test_location_views.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 273
|
2018-04-18T22:09:15.000Z
|
2021-06-04T09:15:48.000Z
|
dfirtrack_main/tests/location/test_location_views.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 75
|
2018-08-31T11:05:37.000Z
|
2021-06-08T14:15:07.000Z
|
dfirtrack_main/tests/location/test_location_views.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 61
|
2018-11-12T22:55:48.000Z
|
2021-06-06T15:16:16.000Z
|
import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Location
class LocationViewTestCase(TestCase):
"""location view tests"""
@classmethod
def setUpTestData(cls):
# create object
Location.objects.create(location_name='location_1')
# create user
User.objects.create_user(
username='testuser_location', password='JvXyGOHOvAEvx6xqls7r'
)
def test_location_list_not_logged_in(self):
"""test list view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/location/', safe='')
# get response
response = self.client.get('/location/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_list_logged_in(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/')
# compare
self.assertEqual(response.status_code, 200)
def test_location_list_template(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/location/location_list.html')
def test_location_list_get_user_context(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_location_list_redirect(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create url
destination = urllib.parse.quote('/location/', safe='/')
# get response
response = self.client.get('/location', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_location_detail_not_logged_in(self):
"""test detail view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/location/' + str(location_1.location_id) + '/', safe=''
)
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_detail_logged_in(self):
"""test detail view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/' + str(location_1.location_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_location_detail_template(self):
"""test detail view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/' + str(location_1.location_id) + '/')
# compare
self.assertTemplateUsed(
response, 'dfirtrack_main/location/location_detail.html'
)
def test_location_detail_get_user_context(self):
"""test detail view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/' + str(location_1.location_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_location_detail_redirect(self):
"""test detail view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create url
destination = urllib.parse.quote(
'/location/' + str(location_1.location_id) + '/', safe='/'
)
# get response
response = self.client.get(
'/location/' + str(location_1.location_id), follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_location_add_not_logged_in(self):
"""test add view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/location/add/', safe='')
# get response
response = self.client.get('/location/add/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_add_logged_in(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_location_add_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form.html')
def test_location_add_get_user_context(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_location_add_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create url
destination = urllib.parse.quote('/location/add/', safe='/')
# get response
response = self.client.get('/location/add', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_location_add_post_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {
'location_name': 'location_add_post_test',
}
# get response
response = self.client.post('/location/add/', data_dict)
# get object
location_id = Location.objects.get(
location_name='location_add_post_test'
).location_id
# create url
destination = urllib.parse.quote(
'/location/' + str(location_id) + '/', safe='/'
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_add_post_invalid_reload(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {}
# get response
response = self.client.post('/location/add/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_location_add_post_invalid_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {}
# get response
response = self.client.post('/location/add/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form.html')
def test_location_add_popup_not_logged_in(self):
"""test add view"""
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/location/add_popup/', safe=''
)
# get response
response = self.client.get('/location/add_popup/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_add_popup_logged_in(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add_popup/')
# compare
self.assertEqual(response.status_code, 200)
def test_location_add_popup_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add_popup/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form_popup.html')
def test_location_add_popup_get_user_context(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get('/location/add_popup/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_location_add_popup_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create url
destination = urllib.parse.quote('/location/add_popup/', safe='/')
# get response
response = self.client.get('/location/add_popup', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_location_add_popup_post_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {
'location_name': 'location_add_popup_post_test',
}
# get response
response = self.client.post('/location/add_popup/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_location_add_popup_post_invalid_reload(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {}
# get response
response = self.client.post('/location/add_popup/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_location_add_popup_post_invalid_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create post data
data_dict = {}
# get response
response = self.client.post('/location/add_popup/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form_popup.html')
def test_location_edit_not_logged_in(self):
"""test edit view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/location/' + str(location_1.location_id) + '/edit/', safe=''
)
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/edit/', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_edit_logged_in(self):
"""test edit view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/edit/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_location_edit_template(self):
"""test edit view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/edit/'
)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form.html')
def test_location_edit_get_user_context(self):
"""test edit view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/edit/'
)
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_location_edit_redirect(self):
"""test edit view"""
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create url
destination = urllib.parse.quote(
'/location/' + str(location_1.location_id) + '/edit/', safe='/'
)
# get response
response = self.client.get(
'/location/' + str(location_1.location_id) + '/edit', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_location_edit_post_redirect(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# create object
location_1 = Location.objects.create(location_name='location_edit_post_test_1')
# create post data
data_dict = {
'location_name': 'location_edit_post_test_2',
}
# get response
response = self.client.post(
'/location/' + str(location_1.location_id) + '/edit/', data_dict
)
# get object
location_2 = Location.objects.get(location_name='location_edit_post_test_2')
# create url
destination = urllib.parse.quote(
'/location/' + str(location_2.location_id) + '/', safe='/'
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_location_edit_post_invalid_reload(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get object
location_id = Location.objects.get(location_name='location_1').location_id
# create post data
data_dict = {}
# get response
response = self.client.post(
'/location/' + str(location_id) + '/edit/', data_dict
)
# compare
self.assertEqual(response.status_code, 200)
def test_location_edit_post_invalid_template(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_location', password='JvXyGOHOvAEvx6xqls7r')
# get object
location_id = Location.objects.get(location_name='location_1').location_id
# create post data
data_dict = {}
# get response
response = self.client.post(
'/location/' + str(location_id) + '/edit/', data_dict
)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/generic_form.html')
| 34.940937
| 88
| 0.625903
| 1,778
| 17,156
| 5.822835
| 0.042182
| 0.060852
| 0.049261
| 0.075534
| 0.963585
| 0.944171
| 0.92833
| 0.921086
| 0.905245
| 0.886506
| 0
| 0.016011
| 0.260958
| 17,156
| 490
| 89
| 35.012245
| 0.800536
| 0.125437
| 0
| 0.544715
| 0
| 0
| 0.174017
| 0.02974
| 0
| 0
| 0
| 0
| 0.138211
| 1
| 0.142276
| false
| 0.121951
| 0.01626
| 0
| 0.162602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
43bae7706c2de5524789a1094d9e2eaf33b1f796
| 136
|
py
|
Python
|
slot/d/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 45
|
2018-12-30T14:19:37.000Z
|
2021-01-28T08:16:41.000Z
|
slot/d/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 23
|
2019-01-07T22:32:00.000Z
|
2019-10-04T10:23:02.000Z
|
slot/d/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 36
|
2019-01-11T21:38:02.000Z
|
2021-01-28T08:16:53.000Z
|
from slot.d.flame import *
from slot.d.water import *
from slot.d.wind import *
from slot.d.light import *
from slot.d.shadow import *
| 19.428571
| 27
| 0.735294
| 25
| 136
| 4
| 0.36
| 0.4
| 0.45
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154412
| 136
| 6
| 28
| 22.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
78fab79da27f5885b9ce431813fe41d18c011d65
| 24,964
|
py
|
Python
|
Differential_Drive.py
|
cfeyer/cfeyer-differential-drive-model
|
ee8c51f6bbbd2faaf15e4662673bcd3f938c1207
|
[
"MIT"
] | null | null | null |
Differential_Drive.py
|
cfeyer/cfeyer-differential-drive-model
|
ee8c51f6bbbd2faaf15e4662673bcd3f938c1207
|
[
"MIT"
] | null | null | null |
Differential_Drive.py
|
cfeyer/cfeyer-differential-drive-model
|
ee8c51f6bbbd2faaf15e4662673bcd3f938c1207
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# MIT License
#
# Copyright (c) 2017 Chris Feyerchak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import unittest
from math import pi, sin, cos, sqrt
class Position_Heading_Coordinates:
def __init__(self, x0=0, y0=0, heading0_rads=0):
self.x = float(x0)
self.y = float(y0)
self.hdg_rads = float(heading0_rads) # 0 = east
def __eq__(self, other):
dx = other.x - self.x
dy = other.y - self.y
dh = other.hdg_rads - self.hdg_rads
#print " self: " + str(self)
#print " other: " + str(other)
#print " dx=" + str(dx) + ", dy=" + str(dy) + ", dh=" + str(dh)
tol = 1.0e-14
return (abs(dx) <= tol) and (abs(dy) <= tol) and (abs(dh) <= tol)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "x=" + str(self.x) + ", y=" + str(self.y) + ", h=" + str(self.hdg_rads)
class Differential_Drive:
def __init__(self, track_width):
self.track_width = float(track_width)
def drive(self, initial_coords, left_travel, right_travel):
final_coords = initial_coords
if(left_travel == right_travel): # stopped or driving straight
final_coords.x = initial_coords.x + cos(initial_coords.hdg_rads) * float(left_travel)
final_coords.y = initial_coords.y + sin(initial_coords.hdg_rads) * float(left_travel)
elif(left_travel == -right_travel): # rotate in place
final_coords.hdg_rads = right_travel / self.track_width + initial_coords.hdg_rads
elif(left_travel == 0): # rotate about left wheel
turn_angle_rads = right_travel/self.track_width
lw = self.left_wheel_coords(initial_coords)
lw_x = lw.x
lw_y = lw.y
phi = turn_angle_rads
x1 = initial_coords.x - lw_x
y1 = initial_coords.y - lw_y
x2 = cos(phi)*x1-sin(phi)*y1
y2 = sin(phi)*x1+cos(phi)*y1
final_coords.x = x2 + lw_x
final_coords.y = y2 + lw_y
final_coords.hdg_rads = initial_coords.hdg_rads + turn_angle_rads
elif(right_travel == 0): # rotate about right wheel
turn_angle_rads = -left_travel/self.track_width
rw = self.right_wheel_coords(initial_coords)
rw_x = rw.x
rw_y = rw.y
phi = turn_angle_rads
x1 = initial_coords.x - rw_x
y1 = initial_coords.y - rw_y
x2 = cos(phi)*x1-sin(phi)*y1
y2 = sin(phi)*x1+cos(phi)*y1
final_coords.x = x2 + rw_x
final_coords.y = y2 + rw_y
final_coords.hdg_rads = initial_coords.hdg_rads + turn_angle_rads
else:
raise Exception("Not implemented for this (left_travel, right_travel)")
return final_coords
def left_wheel_coords(self, p):
x = -(self.track_width/2.0) * sin(p.hdg_rads) + p.x
y = (self.track_width/2.0) * cos(p.hdg_rads) + p.y
h = p.hdg_rads
return Position_Heading_Coordinates(x,y,h)
def right_wheel_coords(self, p):
x = (self.track_width/2.0) * sin(p.hdg_rads) + p.x
y = -(self.track_width/2.0) * cos(p.hdg_rads) + p.y
h = p.hdg_rads
return Position_Heading_Coordinates(x,y,h)
PHC = Position_Heading_Coordinates
DD = Differential_Drive
class Test_PHC(unittest.TestCase):
def test_default_ctor(self):
coords = PHC()
self.assertEqual(coords.x, 0)
self.assertEqual(coords.y, 0)
self.assertEqual(coords.hdg_rads, 0)
def test_custom_ctor(self):
coords = PHC(1,2,3)
self.assertEqual(coords.x, 1)
self.assertEqual(coords.y, 2)
self.assertEqual(coords.hdg_rads, 3)
def test_default_objects_equal(self):
coords_1 = PHC()
coords_2 = PHC()
self.assertEqual(coords_1, coords_2)
def test_custom_objects_equal(self):
coords_1 = PHC(1,2,3)
coords_2 = PHC(1,2,3)
self.assertEqual(coords_1, coords_2)
def test_custom_objects_x_not_equal(self):
coords_1 = PHC(0,0,0)
coords_2 = PHC(1,0,0)
self.assertNotEqual(coords_1, coords_2)
self.assertNotEqual(coords_2, coords_1)
def test_custom_objects_y_not_equal(self):
coords_1 = PHC(0,0,0)
coords_2 = PHC(0,1,0)
self.assertNotEqual(coords_1, coords_2)
self.assertNotEqual(coords_2, coords_1)
self.assertNotEqual(PHC(0,0,0), PHC(0,0,pi/2))
self.assertNotEqual(PHC(0,0,0), PHC(0,0,pi))
self.assertNotEqual(PHC(0,0,pi/2), PHC(0,0,pi))
def test_custom_objects_heading_not_equal(self):
coords_1 = PHC(0,0,0)
coords_2 = PHC(0,0,1)
self.assertNotEqual(coords_1, coords_2)
self.assertNotEqual(coords_2, coords_1)
class Position_Heading_Coordinates_Test_Case(unittest.TestCase):
def assertEqualPHC(self, actual, expected):
x_error = abs(actual.x - expected.x)
y_error = abs(actual.y - expected.y)
heading_error = abs(actual.hdg_rads - expected.hdg_rads)
tol = 1.0e-14
msg = "expected: " + str(expected) + "\n" + \
"actual: " + str(actual)
is_fail = False
if x_error > tol:
is_fail = True
msg += "\nassertion fail: x"
if y_error > tol:
is_fail = True
msg += "\nassertion fail: y"
if heading_error > tol:
is_fail = True
msg += "\nassertion fail: hdg_rads"
if is_fail:
self.fail(msg)
class Test_DD(Position_Heading_Coordinates_Test_Case):
def test_stopped(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(), 0, 0), PHC())
self.assertEqual(dd.drive(PHC(1,2,3), 0, 0), PHC(1,2,3))
pass
def test_drive_forward_straight_east(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,0), 1, 1), PHC(1,0,0))
self.assertEqual(dd.drive(PHC(1,2,0), 2, 2), PHC(3,2,0))
self.assertEqual(dd.drive(PHC(-3,4,0), 4, 4), PHC(1,4,0))
self.assertEqual(dd.drive(PHC(-5,-6,0), 8, 8), PHC(3,-6,0))
self.assertEqual(dd.drive(PHC(7,-8,0), 16, 16), PHC(23,-8,0))
def test_drive_backward_straight_east(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi), -1, -1), PHC(1,0,pi))
self.assertEqual(dd.drive(PHC(1,2,pi), -2, -2), PHC(3,2,pi))
self.assertEqual(dd.drive(PHC(-3,4,pi), -4, -4), PHC(1,4,pi))
self.assertEqual(dd.drive(PHC(-5,-6,pi), -8, -8), PHC(3,-6,pi))
self.assertEqual(dd.drive(PHC(7,-8,pi), -16, -16), PHC(23,-8,pi))
def test_drive_forward_straight_west(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi), 1, 1), PHC(-1,0,pi))
self.assertEqual(dd.drive(PHC(1,2,pi), 2, 2), PHC(-1,2,pi))
self.assertEqual(dd.drive(PHC(-3,4,pi), 4, 4), PHC(-7,4,pi))
self.assertEqual(dd.drive(PHC(-5,-6,pi), 8, 8), PHC(-13,-6,pi))
self.assertEqual(dd.drive(PHC(7,-8,pi), 16, 16), PHC(-9,-8,pi))
def test_drive_backward_straight_west(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,0), -1, -1), PHC(-1,0,0))
self.assertEqual(dd.drive(PHC(1,2,0), -2, -2), PHC(-1,2,0))
self.assertEqual(dd.drive(PHC(-3,4,0), -4, -4), PHC(-7,4,0))
self.assertEqual(dd.drive(PHC(-5,-6,0), -8, -8), PHC(-13,-6,0))
self.assertEqual(dd.drive(PHC(7,-8,0), -16, -16), PHC(-9,-8,0))
def test_drive_forward_straight_north(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi/2), 1, 1), PHC(0,1,pi/2))
self.assertEqual(dd.drive(PHC(1,2,pi/2), 2, 2), PHC(1,4,pi/2))
self.assertEqual(dd.drive(PHC(-3,4,pi/2), 4, 4), PHC(-3,8,pi/2))
self.assertEqual(dd.drive(PHC(-5,-6,pi/2), 8, 8), PHC(-5,2,pi/2))
self.assertEqual(dd.drive(PHC(7,-8,pi/2), 16, 16), PHC(7,8,pi/2))
def test_drive_backward_straight_north(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,-pi/2), -1, -1), PHC(0,1,-pi/2))
self.assertEqual(dd.drive(PHC(1,2,-pi/2), -2, -2), PHC(1,4,-pi/2))
self.assertEqual(dd.drive(PHC(-3,4,-pi/2), -4, -4), PHC(-3,8,-pi/2))
self.assertEqual(dd.drive(PHC(-5,-6,-pi/2), -8, -8), PHC(-5,2,-pi/2))
self.assertEqual(dd.drive(PHC(7,-8,-pi/2), -16, -16), PHC(7,8,-pi/2))
def test_drive_forward_straight_south(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,-pi/2), 1, 1), PHC(0,-1,-pi/2))
self.assertEqual(dd.drive(PHC(1,2,-pi/2), 2, 2), PHC(1,0,-pi/2))
self.assertEqual(dd.drive(PHC(-3,4,-pi/2), 4, 4), PHC(-3,0,-pi/2))
self.assertEqual(dd.drive(PHC(-5,-6,-pi/2), 8, 8), PHC(-5,-14,-pi/2))
self.assertEqual(dd.drive(PHC(7,-8,-pi/2), 16, 16), PHC(7,-24,-pi/2))
def test_drive_backward_straight_south(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi/2), -1, -1), PHC(0,-1,pi/2))
self.assertEqual(dd.drive(PHC(1,2,pi/2), -2, -2), PHC(1,0,pi/2))
self.assertEqual(dd.drive(PHC(-3,4,pi/2), -4, -4), PHC(-3,0,pi/2))
self.assertEqual(dd.drive(PHC(-5,-6,pi/2), -8, -8), PHC(-5,-14,pi/2))
self.assertEqual(dd.drive(PHC(7,-8,pi/2), -16, -16), PHC(7,-24,pi/2))
def test_drive_forward_straight_northeast(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi/4), 1, 1), PHC(sqrt(2)/2,sqrt(2)/2,pi/4))
self.assertEqual(dd.drive(PHC(0,0,pi/6), 1, 1), PHC(sqrt(3)/2,0.5,pi/6))
self.assertEqual(dd.drive(PHC(0,0,pi/3), 1, 1), PHC(0.5,sqrt(3)/2,pi/3))
self.assertEqual(dd.drive(PHC(1,2,pi/4), 2, 2), PHC(1+2*sqrt(2)/2,2+2*sqrt(2)/2,pi/4))
self.assertEqual(dd.drive(PHC(-3,4,pi/4), 4, 4), PHC(-3+4*sqrt(2)/2,4+4*sqrt(2)/2,pi/4))
self.assertEqual(dd.drive(PHC(-5,-6,pi/4), 8, 8), PHC(-5+8*sqrt(2)/2,-6+8*sqrt(2)/2,pi/4))
self.assertEqual(dd.drive(PHC(7,-8,pi/4), 16, 16), PHC(7+16*sqrt(2)/2,-8+16*sqrt(2)/2,pi/4))
def test_drive_backward_straight_northeast(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,5*pi/4), -1, -1), PHC(sqrt(2)/2,sqrt(2)/2,5*pi/4))
self.assertEqual(dd.drive(PHC(0,0,7*pi/6), -1, -1), PHC(sqrt(3)/2,0.5,7*pi/6))
self.assertEqual(dd.drive(PHC(0,0,4*pi/3), -1, -1), PHC(0.5,sqrt(3)/2,4*pi/3))
self.assertEqual(dd.drive(PHC(1,2,5*pi/4), -2, -2), PHC(1+2*sqrt(2)/2,2+2*sqrt(2)/2,5*pi/4))
self.assertEqual(dd.drive(PHC(-3,4,5*pi/4), -4, -4), PHC(-3+4*sqrt(2)/2,4+4*sqrt(2)/2,5*pi/4))
self.assertEqual(dd.drive(PHC(-5,-6,5*pi/4), -8, -8), PHC(-5+8*sqrt(2)/2,-6+8*sqrt(2)/2,5*pi/4))
self.assertEqual(dd.drive(PHC(7,-8,5*pi/4), -16, -16), PHC(7+16*sqrt(2)/2,-8+16*sqrt(2)/2,5*pi/4))
def test_drive_forward_straight_northwest(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,3*pi/4), 1, 1), PHC(-sqrt(2)/2,sqrt(2)/2,3*pi/4))
self.assertEqual(dd.drive(PHC(0,0,5*pi/6), 1, 1), PHC(-sqrt(3)/2,0.5,5*pi/6))
self.assertEqual(dd.drive(PHC(0,0,2*pi/3), 1, 1), PHC(-0.5,sqrt(3)/2,2*pi/3))
def test_drive_backward_straight_northwest(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,7*pi/4), -1, -1), PHC(-sqrt(2)/2,sqrt(2)/2,7*pi/4))
self.assertEqual(dd.drive(PHC(0,0,11*pi/6), -1, -1), PHC(-sqrt(3)/2,0.5,11*pi/6))
self.assertEqual(dd.drive(PHC(0,0,5*pi/3), -1, -1), PHC(-0.5,sqrt(3)/2,5*pi/3))
def test_drive_forward_straight_southwest(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,5*pi/4), 1, 1), PHC(-sqrt(2)/2,-sqrt(2)/2,5*pi/4))
def test_drive_backward_straight_southwest(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,pi/4), -1, -1), PHC(-sqrt(2)/2,-sqrt(2)/2,pi/4))
def test_drive_forward_straight_southeast(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,-pi/4), 1, 1), PHC(sqrt(2)/2,-sqrt(2)/2,-pi/4))
def test_drive_backward_straight_southeast(self):
dd = DD(1)
self.assertEqual(dd.drive(PHC(0,0,3*pi/4), -1, -1), PHC(sqrt(2)/2,-sqrt(2)/2,3*pi/4))
def test_rotate_in_place_counterclockwise_from_zero_heading(self):
dd1 = DD(1)
self.assertEqual(dd1.drive(PHC(0,0,0), -pi/2, pi/2), PHC(0,0,pi/2))
self.assertEqual(dd1.drive(PHC(1,2,0), -pi, pi), PHC(1,2,pi))
self.assertEqual(dd1.drive(PHC(3,4,0), -3*pi/2, 3*pi/2), PHC(3,4,3*pi/2))
dd2 = DD(2)
self.assertEqual(dd2.drive(PHC(0,0,0), -2*pi/2, 2*pi/2 ), PHC(0,0,pi/2))
self.assertEqual(dd2.drive(PHC(1,2,0), -2*pi, 2*pi ), PHC(1,2,pi))
self.assertEqual(dd2.drive(PHC(3,4,0), -2*3*pi/2, 2*3*pi/2), PHC(3,4,3*pi/2))
def test_rotate_in_place_clockwise_from_zero_heading(self):
dd1 = DD(1)
self.assertEqual(dd1.drive(PHC(0,0,0), pi/2, -pi/2), PHC(0,0,-pi/2))
self.assertEqual(dd1.drive(PHC(1,2,0), pi, -pi), PHC(1,2,-pi))
self.assertEqual(dd1.drive(PHC(3,4,0), 3*pi/2, -3*pi/2), PHC(3,4,-3*pi/2))
dd2 = DD(2)
self.assertEqual(dd2.drive(PHC(0,0,0), 2*pi/2, -2*pi/2 ), PHC(0,0,-pi/2))
self.assertEqual(dd2.drive(PHC(1,2,0), 2*pi, -2*pi ), PHC(1,2,-pi))
self.assertEqual(dd2.drive(PHC(3,4,0), 2*3*pi/2, -2*3*pi/2), PHC(3,4,-3*pi/2))
def test_rotate_in_place_counterclockwise_from_nonzero_heading(self):
dd1 = DD(1)
self.assertEqual(dd1.drive(PHC(0,0,1), -pi/2, pi/2), PHC(0,0,pi/2+1))
self.assertEqual(dd1.drive(PHC(1,2,2), -pi, pi), PHC(1,2,pi+2))
self.assertEqual(dd1.drive(PHC(3,4,3), -3*pi/2, 3*pi/2), PHC(3,4,3*pi/2+3))
dd2 = DD(2)
self.assertEqual(dd2.drive(PHC(0,0,-1), -2*pi/2, 2*pi/2 ), PHC(0,0,pi/2-1))
self.assertEqual(dd2.drive(PHC(1,2,-2), -2*pi, 2*pi ), PHC(1,2,pi-2))
self.assertEqual(dd2.drive(PHC(3,4,-3), -2*3*pi/2, 2*3*pi/2), PHC(3,4,3*pi/2-3))
def test_rotate_in_place_clockwise_from_zero_nonheading(self):
dd1 = DD(1)
self.assertEqual(dd1.drive(PHC(0,0,1), pi/2, -pi/2), PHC(0,0,-pi/2+1))
self.assertEqual(dd1.drive(PHC(1,2,2), pi, -pi), PHC(1,2,-pi+2))
self.assertEqual(dd1.drive(PHC(3,4,3), 3*pi/2, -3*pi/2), PHC(3,4,-3*pi/2+3))
dd2 = DD(2)
self.assertEqual(dd2.drive(PHC(0,0,-1), 2*pi/2, -2*pi/2 ), PHC(0,0,-pi/2-1))
self.assertEqual(dd2.drive(PHC(1,2,-2), 2*pi, -2*pi ), PHC(1,2,-pi-2))
self.assertEqual(dd2.drive(PHC(3,4,-3), 2*3*pi/2, -2*3*pi/2), PHC(3,4,-3*pi/2-3))
def test_left_wheel_coords(self):
dd1 = DD(1)
self.assertEqual(dd1.left_wheel_coords(PHC(0,0,0)), PHC(0,0.5,0))
self.assertEqual(dd1.left_wheel_coords(PHC(0,0,pi/2)), PHC(-0.5,0,pi/2))
self.assertEqual(dd1.left_wheel_coords(PHC(0,0,pi)), PHC(0,-0.5,pi))
self.assertEqual(dd1.left_wheel_coords(PHC(0,0,3*pi/2)), PHC(0.5,0,3*pi/2))
self.assertEqual(dd1.left_wheel_coords(PHC(1,2,0)), PHC(0+1,0.5+2,0))
self.assertEqual(dd1.left_wheel_coords(PHC(-3,4,pi/2)), PHC(-0.5-3,0+4,pi/2))
self.assertEqual(dd1.left_wheel_coords(PHC(-5,-6,pi)), PHC(0-5,-0.5-6,pi))
self.assertEqual(dd1.left_wheel_coords(PHC(7,-8,3*pi/2)), PHC(0.5+7,0-8,3*pi/2))
self.assertEqualPHC(dd1.left_wheel_coords(PHC(1,-1.5,0)), PHC(1,-1.0,0))
dd2 = DD(2)
self.assertEqual(dd2.left_wheel_coords(PHC(0,0,0)), PHC(0,1,0))
self.assertEqual(dd2.left_wheel_coords(PHC(0,0,pi/2)), PHC(-1,0,pi/2))
self.assertEqual(dd2.left_wheel_coords(PHC(0,0,pi)), PHC(0,-1,pi))
self.assertEqual(dd2.left_wheel_coords(PHC(0,0,3*pi/2)), PHC(1,0,3*pi/2))
self.assertEqual(dd2.left_wheel_coords(PHC(1,2,0)), PHC(0+1,1+2,0))
self.assertEqual(dd2.left_wheel_coords(PHC(-3,4,pi/2)), PHC(-1-3,0+4,pi/2))
self.assertEqual(dd2.left_wheel_coords(PHC(-5,-6,pi)), PHC(0-5,-1-6,pi))
self.assertEqual(dd2.left_wheel_coords(PHC(7,-8,3*pi/2)), PHC(1+7,0-8,3*pi/2))
def test_right_wheel_coords(self):
dd1 = DD(1)
self.assertEqual(dd1.right_wheel_coords(PHC(0,0,0)), PHC(0,-0.5,0))
self.assertEqual(dd1.right_wheel_coords(PHC(0,0,pi/2)), PHC(0.5,0,pi/2))
self.assertEqual(dd1.right_wheel_coords(PHC(0,0,pi)), PHC(0,0.5,pi))
self.assertEqual(dd1.right_wheel_coords(PHC(0,0,3*pi/2)), PHC(-0.5,0,3*pi/2))
self.assertEqual(dd1.right_wheel_coords(PHC(1,2,0)), PHC(0+1,-0.5+2,0))
self.assertEqual(dd1.right_wheel_coords(PHC(-3,4,pi/2)), PHC(0.5-3,0+4,pi/2))
self.assertEqual(dd1.right_wheel_coords(PHC(-5,-6,pi)), PHC(0-5,0.5-6,pi))
self.assertEqual(dd1.right_wheel_coords(PHC(7,-8,3*pi/2)), PHC(-0.5+7,0-8,3*pi/2))
dd2 = DD(2)
self.assertEqual(dd2.right_wheel_coords(PHC(0,0,0)), PHC(0,-1,0))
self.assertEqual(dd2.right_wheel_coords(PHC(0,0,pi/2)), PHC(1,0,pi/2))
self.assertEqual(dd2.right_wheel_coords(PHC(0,0,pi)), PHC(0,1,pi))
self.assertEqual(dd2.right_wheel_coords(PHC(0,0,3*pi/2)), PHC(-1,0,3*pi/2))
self.assertEqual(dd2.right_wheel_coords(PHC(1,2,0)), PHC(0+1,-1+2,0))
self.assertEqual(dd2.right_wheel_coords(PHC(-3,4,pi/2)), PHC(1-3,0+4,pi/2))
self.assertEqual(dd2.right_wheel_coords(PHC(-5,-6,pi)), PHC(0-5,1-6,pi))
self.assertEqual(dd2.right_wheel_coords(PHC(7,-8,3*pi/2)), PHC(-1+7,0-8,3*pi/2))
def test_rotate_about_left_wheel_counterclockwise(self):
dd1 = DD(1)
self.assertEqualPHC(dd1.drive(PHC(0,-0.5,0),0,pi/2), PHC(0.5,0,pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,pi/2),0,pi/2), PHC(0,0.5,pi))
self.assertEqualPHC(dd1.drive(PHC(0,0.5,pi),0,pi/2), PHC(-0.5,0,3*pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,3*pi/2),0,pi/2), PHC(0,-0.5,2*pi))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,pi/2),0,pi/4), PHC(sqrt(2)/4,sqrt(2)/4,3*pi/4))
self.assertEqualPHC(dd1.drive(PHC(0+1,-0.5+2,0),0,pi/2), PHC(0.5+1,0+2,pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5+3,0+4,pi/2),0,pi/2), PHC(0+3,0.5+4,pi))
self.assertEqualPHC(dd1.drive(PHC(0+5,0.5+6,pi),0,pi/2), PHC(-0.5+5,0+6,3*pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5+7,0+8,3*pi/2),0,pi/2), PHC(0+7,-0.5+8,2*pi))
self.assertEqualPHC(dd1.drive(PHC(0.5+9,0+10,pi/2),0,pi/4), PHC(sqrt(2)/4+9,sqrt(2)/4+10,3*pi/4))
dd2 = DD(2)
self.assertEqualPHC(dd2.drive(PHC(0,-1,0),0,pi), PHC(1,0,pi/2))
self.assertEqualPHC(dd2.drive(PHC(1,0,pi/2),0,pi), PHC(0,1,pi))
self.assertEqualPHC(dd2.drive(PHC(0,1,pi),0,pi), PHC(-1,0,3*pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1,0,3*pi/2),0,pi), PHC(0,-1,2*pi))
self.assertEqualPHC(dd2.drive(PHC(1,0,pi/2),0,pi/2), PHC(sqrt(2)/2,sqrt(2)/2,3*pi/4))
self.assertEqualPHC(dd2.drive(PHC(0+1,-1+2,0),0,pi), PHC(1+1,0+2,pi/2))
self.assertEqualPHC(dd2.drive(PHC(1+3,0+4,pi/2),0,pi), PHC(0+3,1+4,pi))
self.assertEqualPHC(dd2.drive(PHC(0+5,1+6,pi),0,pi), PHC(-1+5,0+6,3*pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1+7,0+8,3*pi/2),0,pi), PHC(0+7,-1+8,2*pi))
self.assertEqualPHC(dd2.drive(PHC(1+9,0+10,pi/2),0,pi/2), PHC(sqrt(2)/2+9,sqrt(2)/2+10,3*pi/4))
def test_rotate_about_left_wheel_clockwise(self):
dd1 = DD(1)
self.assertEqualPHC(dd1.drive(PHC(0,-0.5,0),0,-pi/2), PHC(-0.5,0,-pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,pi/2),0,-pi/2), PHC(0,-0.5,0))
self.assertEqualPHC(dd1.drive(PHC(0,0.5,pi),0,-pi/2), PHC(0.5,0,pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,3*pi/2),0,-pi/2), PHC(0,0.5,pi))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,pi/2),0,-pi/4), PHC(sqrt(2)/4,-sqrt(2)/4,pi/4))
self.assertEqualPHC(dd1.drive(PHC(0+1,-0.5+2,0),0,-pi/2), PHC(-0.5+1,0+2,-pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5+3,0+4,pi/2),0,-pi/2), PHC(0+3,-0.5+4,0))
self.assertEqualPHC(dd1.drive(PHC(0+5,0.5+6,pi),0,-pi/2), PHC(0.5+5,0+6,pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5+7,0+8,3*pi/2),0,-pi/2), PHC(0+7,0.5+8,pi))
self.assertEqualPHC(dd1.drive(PHC(0.5+9,0+10,pi/2),0,-pi/4), PHC(sqrt(2)/4+9,-sqrt(2)/4+10,pi/4))
dd2 = DD(2)
self.assertEqualPHC(dd2.drive(PHC(0,-1,0),0,-pi), PHC(-1,0,-pi/2))
self.assertEqualPHC(dd2.drive(PHC(1,0,pi/2),0,-pi), PHC(0,-1,0))
self.assertEqualPHC(dd2.drive(PHC(0,1,pi),0,-pi), PHC(1,0,pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1,0,3*pi/2),0,-pi), PHC(0,1,pi))
self.assertEqualPHC(dd2.drive(PHC(1,0,pi/2),0,-pi/2), PHC(sqrt(2)/2,-sqrt(2)/2,pi/4))
self.assertEqualPHC(dd2.drive(PHC(0+1,-1+2,0),0,-pi), PHC(-1+1,0+2,-pi/2))
self.assertEqualPHC(dd2.drive(PHC(1+3,0+4,pi/2),0,-pi), PHC(0+3,-1+4,0))
self.assertEqualPHC(dd2.drive(PHC(0+5,1+6,pi),0,-pi), PHC(1+5,0+6,pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1+7,0+8,3*pi/2),0,-pi), PHC(0+7,1+8,pi))
self.assertEqualPHC(dd2.drive(PHC(1+9,0+10,pi/2),0,-pi/2), PHC(sqrt(2)/2+9,-sqrt(2)/2+10,pi/4))
def test_rotate_about_right_wheel_clockwise(self):
dd1 = DD(1)
self.assertEqualPHC(dd1.drive(PHC(0,0.5,0),pi/2,0), PHC(0.5,0,-pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,pi/2),pi/2,0), PHC(0,0.5,0))
self.assertEqualPHC(dd1.drive(PHC(0,-0.5,pi),pi/2,0), PHC(-0.5,0,pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,-pi/2),pi/2,0), PHC(0,-0.5,-pi))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,pi/2),pi/4,0), PHC(-sqrt(2)/4,sqrt(2)/4,pi/4))
self.assertEqualPHC(dd1.drive(PHC(0+1,0+2.5,0),pi/2,0), PHC(0.5+1,0+2,-pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5+3,0+4,pi/2),pi/2,0), PHC(0+3,0+4.5,0))
self.assertEqualPHC(dd1.drive(PHC(0+5,-0.5+6,pi),pi/2,0), PHC(-0.5+5,0+6,pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5+7,0+8,-pi/2),pi/2,0), PHC(0+7,-0.5+8,-pi))
self.assertEqualPHC(dd1.drive(PHC(-0.5+9,0+10,pi/2),pi/4,0), PHC(-sqrt(2)/4+9,sqrt(2)/4+10,pi/4))
dd2 = DD(2)
self.assertEqualPHC(dd2.drive(PHC(0,1,0),pi,0), PHC(1,0,-pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1,0,pi/2),pi,0), PHC(0,1,0))
self.assertEqualPHC(dd2.drive(PHC(0,-1,pi),pi,0), PHC(-1,0,pi/2))
self.assertEqualPHC(dd2.drive(PHC(1,0,-pi/2),pi,0), PHC(0,-1,-pi))
self.assertEqualPHC(dd2.drive(PHC(-1,0,pi/2),pi/2,0), PHC(-sqrt(2)/2,sqrt(2)/2,pi/4))
self.assertEqualPHC(dd2.drive(PHC(0+1,1+2,0),pi,0), PHC(1+1,0+2,-pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1+3,0+4,pi/2),pi,0), PHC(0+3,1+4,0))
self.assertEqualPHC(dd2.drive(PHC(0+5,-1+6,pi),pi,0), PHC(-1+5,0+6,pi/2))
self.assertEqualPHC(dd2.drive(PHC(1+7,0+8,-pi/2),pi,0), PHC(0+7,-1+8,-pi))
self.assertEqualPHC(dd2.drive(PHC(-1+9,0+10,pi/2),pi/2,0), PHC(-sqrt(2)/2+9,sqrt(2)/2+10,pi/4))
def test_rotate_about_right_wheel_counterclockwise(self):
dd1 = DD(1)
self.assertEqualPHC(dd1.drive(PHC(0,0.5,0),-pi/2,0), PHC(-0.5,0,pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,pi/2),-pi/2,0), PHC(0,-0.5,pi))
self.assertEqualPHC(dd1.drive(PHC(0,-0.5,pi),-pi/2,0), PHC(0.5,0,3*pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5,0,-pi/2),-pi/2,0), PHC(0,0.5,0))
self.assertEqualPHC(dd1.drive(PHC(-0.5,0,pi/2),-pi/4,0), PHC(-sqrt(2)/4,-sqrt(2)/4,3*pi/4))
self.assertEqualPHC(dd1.drive(PHC(0+1,0.5+2,0),-pi/2,0), PHC(-0.5+1,0+2,pi/2))
self.assertEqualPHC(dd1.drive(PHC(-0.5+3,0+4,pi/2),-pi/2,0), PHC(0+3,-0.5+4,pi))
self.assertEqualPHC(dd1.drive(PHC(0+5,-0.5+6,pi),-pi/2,0), PHC(0.5+5,0+6,3*pi/2))
self.assertEqualPHC(dd1.drive(PHC(0.5+7,0+8,-pi/2),-pi/2,0), PHC(0+7,0.5+8,0))
self.assertEqualPHC(dd1.drive(PHC(-0.5+9,0+10,pi/2),-pi/4,0), PHC(-sqrt(2)/4+9,-sqrt(2)/4+10,3*pi/4))
dd2 = DD(2)
self.assertEqualPHC(dd2.drive(PHC(0,1,0),-pi,0), PHC(-1,0,pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1,0,pi/2),-pi,0), PHC(0,-1,pi))
self.assertEqualPHC(dd2.drive(PHC(0,-1,pi),-pi,0), PHC(1,0,3*pi/2))
self.assertEqualPHC(dd2.drive(PHC(1,0,-pi/2),-pi,0), PHC(0,1,0))
self.assertEqualPHC(dd2.drive(PHC(-1,0,pi/2),-pi/2,0), PHC(-sqrt(2)/2,-sqrt(2)/2,3*pi/4))
self.assertEqualPHC(dd2.drive(PHC(0+1,1+2,0),-pi,0), PHC(-1+1,0+2,pi/2))
self.assertEqualPHC(dd2.drive(PHC(-1+3,0+4,pi/2),-pi,0), PHC(0+3,-1+4,pi))
self.assertEqualPHC(dd2.drive(PHC(0+5,-1+6,pi),-pi,0), PHC(1+5,0+6,3*pi/2))
self.assertEqualPHC(dd2.drive(PHC(1+7,0+8,-pi/2),-pi,0), PHC(0+7,1+8,0))
self.assertEqualPHC(dd2.drive(PHC(-1+9,0+10,pi/2),-pi/2,0), PHC(-sqrt(2)/2+9,-sqrt(2)/2+10,3*pi/4))
if __name__ == '__main__':
unittest.main()
| 49.045187
| 107
| 0.611761
| 5,037
| 24,964
| 2.951161
| 0.044868
| 0.050858
| 0.053885
| 0.097679
| 0.826976
| 0.800942
| 0.78816
| 0.771275
| 0.761319
| 0.734612
| 0
| 0.110952
| 0.161312
| 24,964
| 508
| 108
| 49.141732
| 0.599035
| 0.052476
| 0
| 0.164141
| 0
| 0
| 0.006519
| 0
| 0
| 0
| 0
| 0
| 0.565657
| 0
| null | null | 0.002525
| 0.005051
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6029b9d38644c9147e22c6268f4926b133f26c89
| 1,510
|
py
|
Python
|
necropsy/models.py
|
rodrigoncalves/anato-hub
|
bb6c5a39bfbc247476a6ef4fe410c972b5e561c4
|
[
"MIT"
] | null | null | null |
necropsy/models.py
|
rodrigoncalves/anato-hub
|
bb6c5a39bfbc247476a6ef4fe410c972b5e561c4
|
[
"MIT"
] | null | null | null |
necropsy/models.py
|
rodrigoncalves/anato-hub
|
bb6c5a39bfbc247476a6ef4fe410c972b5e561c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from exam.models import Exam, ReportStatus
class NecropsyStatus(models.Model):
description = models.CharField(max_length=50)
class Necropsy(models.Model):
examination_time = models.TimeField(null=True, blank=True)
clinical_information = models.TextField(null=True, blank=True)
main_disease = models.TextField(null=True, blank=True)
consequential_final_disease = models.TextField(null=True, blank=True)
contributors_disease = models.TextField(null=True, blank=True)
consequential_disease = models.TextField(null=True, blank=True)
other_diseases = models.TextField(null=True, blank=True)
note = models.TextField(null=True, blank=True)
footer = models.TextField(null=True, blank=True)
status = models.ForeignKey(NecropsyStatus, default=1)
exam = models.ForeignKey(Exam)
class NecropsyReport(models.Model):
clinical_information = models.TextField(null=True, blank=True)
main_disease = models.TextField(null=True, blank=True)
consequential_final_disease = models.TextField(null=True, blank=True)
contributors_disease = models.TextField(null=True, blank=True)
consequential_disease = models.TextField(null=True, blank=True)
other_diseases = models.TextField(null=True, blank=True)
note = models.TextField(null=True, blank=True)
footer = models.TextField(null=True, blank=True)
status = models.ForeignKey(ReportStatus, default=1)
necropsy = models.ForeignKey(Necropsy)
| 41.944444
| 73
| 0.756291
| 186
| 1,510
| 6.053763
| 0.225806
| 0.120782
| 0.19627
| 0.256661
| 0.701599
| 0.701599
| 0.701599
| 0.701599
| 0.701599
| 0.701599
| 0
| 0.003817
| 0.13245
| 1,510
| 35
| 74
| 43.142857
| 0.855725
| 0.013907
| 0
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
60317146635eeaae800b5fae8827d40a9e2e4b9f
| 3,419
|
py
|
Python
|
prs_lib/draft.py
|
Press-One/prs-lib-py
|
5f088d58993a93bee660a27579966aa015606c9a
|
[
"MIT"
] | 1
|
2019-07-27T06:41:18.000Z
|
2019-07-27T06:41:18.000Z
|
prs_lib/draft.py
|
Press-One/prs-lib-py
|
5f088d58993a93bee660a27579966aa015606c9a
|
[
"MIT"
] | 2
|
2019-07-27T07:47:05.000Z
|
2019-08-28T13:24:40.000Z
|
prs_lib/draft.py
|
Press-One/prs-lib-py
|
5f088d58993a93bee660a27579966aa015606c9a
|
[
"MIT"
] | 1
|
2019-07-03T14:23:29.000Z
|
2019-07-03T14:23:29.000Z
|
from .request import request
from . import validator
__all__ = ['Draft', ]
class Draft:
def __init__(self, config):
"""
:param config: PRSConfig
"""
self.config = config
def create(self, draft):
"""
:param draft: dict
{
'title', str,
'content': str,
'mime_type': str,
'source': str,
'origin_url': str,
'project_id': str
}
"""
keys = ['title', 'content', 'mime_type', ]
validator.check_dict_and_assert('draft', draft, keys)
auth_opts = self.config.get_auth_opts()
data = {
'title': draft['title'],
'content': draft['content'],
'mimeType': draft['mime_type'],
}
if draft.get('source'):
data['source'] = draft['source']
if draft.get('origin_url'):
data['originUrl'] = draft['origin_url']
if draft.get('project_id'):
data['projectId'] = draft['project_id']
return request(
self.config.host,
method='POST',
path='/drafts',
data=data,
auth_opts=auth_opts,
debug=self.config.debug,
)
def update(self, _id, draft):
"""
:param _id: str, draft id
:param draft: dict
{
'title', str,
'content': str,
'mime_type': str,
'source': str,
'origin_url': str,
'project_id': str
}
"""
validator.assert_exc(_id, '_id cannot be null')
keys = ['title', 'content', 'mime_type', ]
validator.check_dict_and_assert('draft', draft, keys)
auth_opts = self.config.get_auth_opts()
data = {
'title': draft['title'],
'content': draft['content'],
'mimeType': draft['mime_type'],
}
if draft.get('source'):
data['source'] = draft['source']
if draft.get('origin_url'):
data['originUrl'] = draft['origin_url']
if draft.get('project_id'):
data['projectId'] = draft['project_id']
return request(
self.config.host,
method='PUT',
path=f'/drafts/{_id}',
data=data,
auth_opts=auth_opts,
debug=self.config.debug,
)
def delete(self, _id):
"""
:param _id: str, draft id
"""
validator.assert_exc(_id, '_id cannot be null')
return request(
self.config.host,
method='DELETE',
path=f'/drafts/{_id}',
auth_opts=self.config.get_auth_opts(),
debug=self.config.debug,
)
def get_by_id(self, _id):
"""
:param _id: str, draft id
"""
validator.assert_exc(_id, '_id cannot be null')
return request(
self.config.host,
method='GET',
path=f'/drafts/{_id}',
auth_opts=self.config.get_auth_opts(),
debug=self.config.debug,
)
def get_drafts(self):
return request(
self.config.host,
method='GET',
path='/drafts',
auth_opts=self.config.get_auth_opts(),
debug=self.config.debug,
)
| 28.491667
| 61
| 0.473238
| 341
| 3,419
| 4.533724
| 0.155425
| 0.109961
| 0.03881
| 0.058215
| 0.863519
| 0.852523
| 0.852523
| 0.852523
| 0.802717
| 0.802717
| 0
| 0
| 0.390758
| 3,419
| 119
| 62
| 28.731092
| 0.742199
| 0.134542
| 0
| 0.734177
| 0
| 0
| 0.154045
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 1
| 0.075949
| false
| 0
| 0.025316
| 0.012658
| 0.177215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
605f550a2e7e2d640797e5537d656d6b76d65994
| 62
|
py
|
Python
|
banzai/setup_package.py
|
gmbrandt/banzai
|
af6ddb529e8c35eaa87abf67372160f2dd99050b
|
[
"BSD-3-Clause"
] | null | null | null |
banzai/setup_package.py
|
gmbrandt/banzai
|
af6ddb529e8c35eaa87abf67372160f2dd99050b
|
[
"BSD-3-Clause"
] | null | null | null |
banzai/setup_package.py
|
gmbrandt/banzai
|
af6ddb529e8c35eaa87abf67372160f2dd99050b
|
[
"BSD-3-Clause"
] | null | null | null |
def get_package_data():
return {'package_data': 'data/*'}
| 20.666667
| 37
| 0.66129
| 8
| 62
| 4.75
| 0.625
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 62
| 2
| 38
| 31
| 0.716981
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6075be328810357a9c185953115c50a466e62c20
| 9,647
|
py
|
Python
|
tests/assemblers/test_svm.py
|
yarix/m2cgen
|
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
|
[
"MIT"
] | 1
|
2021-05-28T06:59:21.000Z
|
2021-05-28T06:59:21.000Z
|
tests/assemblers/test_svm.py
|
yarix/m2cgen
|
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
|
[
"MIT"
] | null | null | null |
tests/assemblers/test_svm.py
|
yarix/m2cgen
|
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from sklearn import svm
from lightning.classification import KernelSVC
from m2cgen import assemblers, ast
from tests import utils
def test_rbf_kernel():
estimator = svm.SVC(kernel="rbf", random_state=1, gamma=2.0)
estimator.fit([[1], [2]], [1, 2])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
actual = assembler.assemble()
kernels = [_rbf_kernel_ast(estimator, 1.), _rbf_kernel_ast(estimator, 2.)]
expected = _create_expected_single_output_ast(
estimator.dual_coef_, estimator.intercept_, kernels)
assert utils.cmp_exprs(actual, expected)
def test_linear_kernel():
estimator = svm.SVC(kernel="linear", random_state=1)
estimator.fit([[1], [2]], [1, 2])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
actual = assembler.assemble()
def kernel_ast(sup_vec_value):
return ast.BinNumExpr(
ast.NumVal(sup_vec_value),
ast.FeatureRef(0),
ast.BinNumOpType.MUL)
expected = _create_expected_single_output_ast(
estimator.dual_coef_, estimator.intercept_,
[kernel_ast(1.0), kernel_ast(2.0)])
assert utils.cmp_exprs(actual, expected)
def test_sigmoid_kernel():
estimator = svm.SVC(kernel="sigmoid", random_state=1, gamma=2.0)
estimator.fit([[1], [2]], [1, 2])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
actual = assembler.assemble()
def kernel_ast(sup_vec_value):
return ast.TanhExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(estimator.gamma),
ast.BinNumExpr(
ast.NumVal(sup_vec_value),
ast.FeatureRef(0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.MUL),
ast.NumVal(0.0),
ast.BinNumOpType.ADD))
expected = _create_expected_single_output_ast(
estimator.dual_coef_, estimator.intercept_,
[kernel_ast(1.0), kernel_ast(2.0)])
assert utils.cmp_exprs(actual, expected)
def test_poly_kernel():
estimator = svm.SVC(kernel="poly", random_state=1, gamma=2.0, degree=2)
estimator.fit([[1], [2]], [1, 2])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
actual = assembler.assemble()
def kernel_ast(sup_vec_value):
return ast.PowExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(estimator.gamma),
ast.BinNumExpr(
ast.NumVal(sup_vec_value),
ast.FeatureRef(0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.MUL),
ast.NumVal(0.0),
ast.BinNumOpType.ADD),
ast.NumVal(estimator.degree))
expected = _create_expected_single_output_ast(
estimator.dual_coef_, estimator.intercept_,
[kernel_ast(1.0), kernel_ast(2.0)])
assert utils.cmp_exprs(actual, expected)
def test_cosine_kernel():
estimator = KernelSVC(kernel="cosine", random_state=1, gamma=2.0)
estimator.fit(np.array([[1], [2]]), [1, 2])
assembler = assemblers.LightningSVMModelAssembler(estimator)
actual = assembler.assemble()
def kernel_ast(sup_vec_value):
feature_norm = ast.SqrtExpr(
ast.BinNumExpr(
ast.FeatureRef(0),
ast.FeatureRef(0),
ast.BinNumOpType.MUL),
to_reuse=True)
return ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(sup_vec_value),
ast.FeatureRef(0),
ast.BinNumOpType.MUL),
ast.IfExpr(
ast.CompExpr(
feature_norm,
ast.NumVal(0.0),
ast.CompOpType.EQ),
ast.NumVal(1.0),
feature_norm),
ast.BinNumOpType.DIV)
expected = _create_expected_single_output_ast(
estimator.coef_, estimator.intercept_,
[kernel_ast(1.0), kernel_ast(1.0)])
assert utils.cmp_exprs(actual, expected)
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_unknown_kernel():
estimator = svm.SVC(kernel=lambda x, y: np.transpose(x) * y)
estimator.fit([[1], [2]], [1, 2])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
assembler.assemble()
def test_multi_class_rbf_kernel():
estimator = svm.SVC(kernel="rbf", random_state=1, gamma=2.0)
estimator.fit([[1], [2], [3]], [1, 2, 3])
assembler = assemblers.SklearnSVMModelAssembler(estimator)
actual = assembler.assemble()
kernels = [
_rbf_kernel_ast(estimator, float(i), to_reuse=True)
for i in range(1, 4)
]
expected = ast.VectorVal([
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[1],
ast.NumVal(-1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[0],
ast.NumVal(1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[2],
ast.NumVal(-1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[0],
ast.NumVal(1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[2],
ast.NumVal(-1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[1],
ast.NumVal(1.0),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD)])
assert utils.cmp_exprs(actual, expected)
def test_lightning_multi_class_rbf_kernel():
estimator = KernelSVC(kernel="rbf", random_state=1, gamma=2.0)
estimator.fit(np.array([[1], [2], [3]]), np.array([1, 2, 3]))
assembler = assemblers.LightningSVMModelAssembler(estimator)
actual = assembler.assemble()
kernels = [
_rbf_kernel_ast(estimator, float(i))
for i in range(1, 4)
]
expected = ast.VectorVal([
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[0],
ast.NumVal(0.5342246289),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[1],
ast.NumVal(-0.5046204480),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[2],
ast.NumVal(-0.4659431306),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[0],
ast.NumVal(-0.5386765707),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[1],
ast.NumVal(0.5729019463),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[2],
ast.NumVal(-0.5386765707),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
kernels[0],
ast.NumVal(-0.4659431306),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[1],
ast.NumVal(-0.5046204480),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels[2],
ast.NumVal(0.5342246289),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD)])
assert utils.cmp_exprs(actual, expected)
def _create_expected_single_output_ast(coef, intercept, kernels_ast):
return ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(intercept[0]),
ast.BinNumExpr(
kernels_ast[0],
ast.NumVal(coef[0][0]),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
kernels_ast[1],
ast.NumVal(coef[0][1]),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD)
def _rbf_kernel_ast(estimator, sup_vec_value, to_reuse=False):
return ast.ExpExpr(
ast.BinNumExpr(
ast.NumVal(-estimator.gamma),
ast.PowExpr(
ast.BinNumExpr(
ast.NumVal(sup_vec_value),
ast.FeatureRef(0),
ast.BinNumOpType.SUB),
ast.NumVal(2)),
ast.BinNumOpType.MUL),
to_reuse=to_reuse)
| 31.220065
| 78
| 0.531357
| 967
| 9,647
| 5.159255
| 0.105481
| 0.119864
| 0.093005
| 0.092604
| 0.84987
| 0.79996
| 0.782121
| 0.728202
| 0.719383
| 0.710964
| 0
| 0.03876
| 0.358142
| 9,647
| 308
| 79
| 31.321429
| 0.766957
| 0
| 0
| 0.756972
| 0
| 0
| 0.003317
| 0
| 0
| 0
| 0
| 0
| 0.027888
| 1
| 0.055777
| false
| 0
| 0.023904
| 0.01992
| 0.103586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6083ce7263449fc2bf2b46ed36d9f1d786964ff2
| 124
|
py
|
Python
|
alerter/src/abstract/__init__.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/src/abstract/__init__.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/src/abstract/__init__.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
from .component import Component
from .publisher import PublisherComponent
from .publisher import QueuingPublisherComponent
| 31
| 48
| 0.879032
| 12
| 124
| 9.083333
| 0.5
| 0.238532
| 0.348624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 124
| 3
| 49
| 41.333333
| 0.973214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
60d1bb920fb35f924bdc7f485612fda7d2f97739
| 31
|
py
|
Python
|
src/pypegasus_example/mod3.py
|
pegasus-isi/pypegasus-example
|
c4adf202dae8133073e8616a99bdf4c4baef1b4c
|
[
"Apache-2.0"
] | null | null | null |
src/pypegasus_example/mod3.py
|
pegasus-isi/pypegasus-example
|
c4adf202dae8133073e8616a99bdf4c4baef1b4c
|
[
"Apache-2.0"
] | null | null | null |
src/pypegasus_example/mod3.py
|
pegasus-isi/pypegasus-example
|
c4adf202dae8133073e8616a99bdf4c4baef1b4c
|
[
"Apache-2.0"
] | null | null | null |
def y():
print("3" * 1000)
| 10.333333
| 21
| 0.451613
| 5
| 31
| 2.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 0.290323
| 31
| 2
| 22
| 15.5
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
60f013dbde3e664924f17e2811dd85906a591d8c
| 4,662
|
py
|
Python
|
hall-of-fame/round-02/07_yejik1002/myunit.py
|
hansol000808/cau-osp2020-game
|
6912d3e96e5d2625a98e8d5e95ee8b2fe1178584
|
[
"Apache-2.0"
] | 17
|
2020-12-08T14:33:01.000Z
|
2020-12-17T15:50:59.000Z
|
hall-of-fame/round-02/07_yejik1002/myunit.py
|
hansol000808/cau-osp2020-game
|
6912d3e96e5d2625a98e8d5e95ee8b2fe1178584
|
[
"Apache-2.0"
] | 2
|
2020-11-26T12:21:53.000Z
|
2020-12-02T07:07:24.000Z
|
hall-of-fame/round-02/07_yejik1002/myunit.py
|
hansol000808/cau-osp2020-game
|
6912d3e96e5d2625a98e8d5e95ee8b2fe1178584
|
[
"Apache-2.0"
] | 79
|
2020-11-26T00:43:18.000Z
|
2020-12-18T14:18:37.000Z
|
from loa.unit import Unit
class YUnit(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit2(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit3(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit4(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit5(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit6(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit7(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit8(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit9(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
class YUnit10(Unit):
HP = 32.100000005 # Hit Points (health points)
ATT = 0 # Attack
ARM = 11.93333333 # Armor
EVS = 0
def __init__(self, team, name, pos):
cls = __class__
super().__init__(team,
name,
pos,
hp=cls.HP,
att=cls.ATT,
arm=cls.ARM,
evs=cls.EVS)
| 27.423529
| 56
| 0.38417
| 445
| 4,662
| 3.755056
| 0.083146
| 0.095751
| 0.131658
| 0.101735
| 0.948534
| 0.948534
| 0.948534
| 0.948534
| 0.948534
| 0.948534
| 0
| 0.109489
| 0.529816
| 4,662
| 169
| 57
| 27.585799
| 0.652828
| 0.096311
| 0
| 0.921986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070922
| false
| 0
| 0.007092
| 0
| 0.432624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
716e225daa1d23f8e698c89326e59e99bdd8b301
| 16,513
|
py
|
Python
|
chat/tests_views.py
|
helmetwearer/dating-app
|
8222de25ef3fed9f5ca5e826f6da72243a1b2a1d
|
[
"MIT"
] | null | null | null |
chat/tests_views.py
|
helmetwearer/dating-app
|
8222de25ef3fed9f5ca5e826f6da72243a1b2a1d
|
[
"MIT"
] | null | null | null |
chat/tests_views.py
|
helmetwearer/dating-app
|
8222de25ef3fed9f5ca5e826f6da72243a1b2a1d
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Conversations, Messages, Winks, Reject
from django.urls import reverse
from django.contrib.auth import authenticate, login
from django.contrib import auth
# from django.http import HttpResponsePermanentRedirect
# from django.core.files.uploadedfile import SimpleUploadedFile
from checkout.models import Subscription
from profiles.models import Profile
from django.utils import six
import json
# Create your tests here.
class TestViews(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo',
email='foo@test.com',
password='bar')
test_user_two = User.objects.create_user(username='foo3',
email='foo3@test.com',
password='bar')
"""
A customer in Stripe under id 'cus_FMdSeBRaEVZlmh' is used
"""
# Test get chat page
def test_chat_page(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
receiver_profile = User.objects.get(username='foo3')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
# Make conversation
conversation = Conversations()
conversation.save()
conversation.participants.add(current_profile.user.id)
conversation.participants.add(receiver_profile.id)
page = self.client.get('/chat/%s' % conversation.id)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'chat.html')
# Test post chat page creates message and redirects back to chat
def test_post_chat_page(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
receiver_profile = User.objects.get(username='foo3')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
# Make conversation
conversation = Conversations()
conversation.save()
conversation.participants.add(current_profile.user.id)
conversation.participants.add(receiver_profile.id)
page = self.client.post(reverse('chat', kwargs={'id':conversation.id}), {
'message_content': 'foo'})
self.assertTrue(Messages.objects.get(message_content='foo'))
self.assertRedirects(page, '/chat/%s' % conversation.id, status_code=302)
# Test get chat home page (no previous conversations)
def test_get_chat_home_no_previous_conversations(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
page = self.client.get('/chat/home/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'chat_home.html')
# Test get chat home page (previous conversations) (received message)
def test_get_chat_home_previous_conversations_received_message(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
receiver_profile = Profile.objects.get(user_id__username='foo3')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
# Create conversation and messages
conversation = Conversations()
conversation.save()
conversation.participants.add(current_profile.user.id)
conversation.participants.add(receiver_profile.user.id)
message = Messages(message_content="foo", is_read=False, receiver_id=current_profile.user.id, sender_id=receiver_profile.user.id, conversation_id=conversation.id)
message.save()
page = self.client.get('/chat/home/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'chat_home.html')
# Test get chat home page (previous conversations) (sent message)
def test_get_chat_home_previous_conversations_sent_message(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
receiver_profile = Profile.objects.get(user_id__username='foo3')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
# Create conversation and messages
conversation = Conversations()
conversation.save()
conversation.participants.add(current_profile.user.id)
conversation.participants.add(receiver_profile.user.id)
message = Messages(message_content="foo", is_read=False, receiver_id=receiver_profile.user.id, sender_id=current_profile.user.id, conversation_id=conversation.id)
message.save()
page = self.client.get('/chat/home/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'chat_home.html')
# Test get winks page
def test_get_winks(self):
self.client.login(username='foo', password='bar')
page = self.client.get('/chat/winks/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'winks.html')
# Test get views page
def test_get_views(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
page = self.client.get('/chat/views/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'views.html')
# Test message check returns JSON data
def test_ajax_new_message_check(self):
self.client.login(username='foo', password='bar')
current_profile = Profile.objects.get(user_id__username='foo')
receiver_profile = User.objects.get(username='foo3')
# Create conversation and messages
conversation = Conversations()
conversation.save()
conversation.participants.add(current_profile.user.id)
conversation.participants.add(receiver_profile.id)
message = Messages(message_content="foo", is_read=False, receiver_id=current_profile.id, sender_id=receiver_profile.id, conversation_id=conversation.id)
message.save()
# Make user premium
current_profile.is_premium = True
current_profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_profile.user.id)
subscription.save()
page = self.client.get(reverse('new_message_check'), {
'url_id': conversation.id}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
# https://stackoverflow.com/questions/27472663/how-to-use-djangos-assertjsonequal-to-verify-response-of-view-returning-jsonres
if six.PY3:
response_content = str(response_content, encoding='utf8')
self.assertJSONEqual(
response_content,
{'conversation' : True}
)
# Test wink returns correct JSON - last wink unread
def test_ajax_wink_not_read(self):
self.client.login(username='foo', password='bar')
current_user = User.objects.get(username='foo')
receiver_user = User.objects.get(username='foo3')
# Create unread wink
wink = Winks(receiver=receiver_user, sender=current_user)
wink.save()
page = self.client.get(reverse('wink'), {
'receiver_id': receiver_user.id}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
self.assertJSONEqual(
response_content,
{'message' : "Member hasn't viewed your last wink yet"}
)
# Test wink returns correct JSON - no unread winks
def test_ajax_wink_no_unread(self):
self.client.login(username='foo', password='bar')
receiver_profile = User.objects.get(username='foo3')
page = self.client.get(reverse('wink'), {
'receiver_id': receiver_profile.id}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
self.assertJSONEqual(
response_content,
{'message' : 'Wink successfully sent.'}
)
# Test reject creates reject record (as one is not yet created) and returns 204 response
def test_ajax_reject(self):
self.client.login(username='foo', password='bar')
receiver_user = User.objects.get(username='foo3')
current_user = User.objects.get(username='foo')
# Record not yet created
reject_record = Reject.objects.filter(sender_id=current_user, receiver_id=receiver_user).exists()
self.assertFalse(reject_record)
page = self.client.get(reverse('reject'), {
'receiver_id': receiver_user.id}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
self.assertJSONEqual(
response_content,
{'message' : 'Member successfully skipped'}
)
# Test chat ajax returns JSON and creates conversation and message (conversation not yet exists)
def test_ajax_message_no_conversation(self):
self.client.login(username='foo', password='bar')
receiver_user = User.objects.get(username='foo3')
current_user = User.objects.get(username='foo')
# Make user premium
current_user.profile.is_premium = True
current_user.profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_user.id)
subscription.save()
page = self.client.post(reverse('new_message'), {
'message_receiver': receiver_user.id, 'message_content': 'foo'}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
message = Messages.objects.filter(message_content='foo', sender_id=current_user, receiver_id=receiver_user).exists()
self.assertTrue(message)
self.assertJSONEqual(
response_content,
{'message' : 'Message Successfully Sent'}
)
# Test chat ajax returns JSON and creates message (conversation already exists)
def test_ajax_message_conversation_exists(self):
self.client.login(username='foo', password='bar')
receiver_user = User.objects.get(username='foo3')
current_user = User.objects.get(username='foo')
# Make user premium
current_user.profile.is_premium = True
current_user.profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_user.id)
subscription.save()
# Create conversation
conversation = Conversations()
conversation.save()
conversation.participants.add(current_user.id)
conversation.participants.add(receiver_user.id)
conversation.save()
page = self.client.post(reverse('new_message'), {
'message_receiver': receiver_user.id, 'message_content': 'foo'}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
message = Messages.objects.filter(message_content='foo', sender_id=current_user, receiver_id=receiver_user).exists()
self.assertTrue(message)
self.assertJSONEqual(
response_content,
{'message' : 'Message Successfully Sent'}
)
# Test AJAX request reads all messages in conversation
def test_ajax_read_messages(self):
self.client.login(username='foo', password='bar')
receiver_user = User.objects.get(username='foo3')
current_user = User.objects.get(username='foo')
# Create conversation and messages
conversation = Conversations()
conversation.save()
conversation.participants.add(current_user.id)
conversation.participants.add(receiver_user.id)
message = Messages(message_content="foo", is_read=False, receiver_id=current_user.id, sender_id=receiver_user.id, conversation_id=conversation.id)
message.save()
page = self.client.get(reverse('read_messages'), {
'url_id': 1}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
message = Messages.objects.get(pk=message.id)
self.assertEqual(message.is_read, True)
self.assertJSONEqual(
response_content,
{'conversation' : False}
)
# Test ajax read wink returns 204 status code
def test_ajax_read_wink(self):
self.client.login(username='foo', password='bar')
page = self.client.post(reverse('read_wink'), {
'page': 1}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(page.status_code, 204)
# Test ajax read view returns redirect directive (not premium)
def test_ajax_read_view_not_premium(self):
self.client.login(username='foo', password='bar')
page = self.client.post(reverse('read_view'), {
'page': 1}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
response_content = page.content
if six.PY3:
response_content = str(response_content, encoding='utf8')
self.assertJSONEqual(
response_content,
{'redirect' : '/subscribe'}
)
# Test ajax read view returns 204 response (user premium)
def test_ajax_read_view_premium(self):
self.client.login(username='foo', password='bar')
current_user = User.objects.get(username='foo')
# Make user premium
current_user.profile.is_premium = True
current_user.profile.save()
subscription = Subscription(plan="monthly", customer_id="cus_FMdSeBRaEVZlmh", user_id=current_user.id)
subscription.save()
page = self.client.post(reverse('read_view'), {
'page': 1}, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(page.status_code, 204)
| 45.117486
| 170
| 0.63992
| 1,787
| 16,513
| 5.716844
| 0.088976
| 0.029366
| 0.022905
| 0.031617
| 0.811668
| 0.767326
| 0.759299
| 0.742267
| 0.733262
| 0.707322
| 0
| 0.006268
| 0.256101
| 16,513
| 366
| 171
| 45.117486
| 0.825383
| 0.096712
| 0
| 0.718631
| 0
| 0
| 0.099905
| 0.014195
| 0
| 0
| 0
| 0
| 0.106464
| 1
| 0.068441
| false
| 0.072243
| 0.038023
| 0
| 0.110266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
718b598da96e5e74661dfc7d3dd0fcdc78f8cff2
| 11,370
|
py
|
Python
|
ec2_compare/internal/instance_type/u_.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
ec2_compare/internal/instance_type/u_.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
ec2_compare/internal/instance_type/u_.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1, 'DefaultVCpus': 224, 'DefaultCores': 224, 'DefaultThreadsPerCore': 1, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1], 'SizeInMiB': 6291456, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'u-6tb1.56xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1}, 'VCpuInfo': {'DefaultVCpus': 224, 'DefaultCores': 224, 'DefaultThreadsPerCore': 1, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 6291456}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1, 'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 12582912, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'u-12tb1.112xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1}, 'VCpuInfo': {'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 12582912}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1, 'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 6291456, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'u-6tb1.112xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1}, 'VCpuInfo': {'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 6291456}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1, 'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 9437184, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'u-9tb1.112xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.1}, 'VCpuInfo': {'DefaultVCpus': 448, 'DefaultCores': 224, 'DefaultThreadsPerCore': 2, 'ValidCores': [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 9437184}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': True}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with InstanceType = u- .'''
# pylint: disable=all
return get
| 947.5
| 11,180
| 0.73518
| 979
| 11,370
| 8.52809
| 0.13381
| 0.009582
| 0.053659
| 0.099653
| 0.977961
| 0.977961
| 0.977961
| 0.971613
| 0.971613
| 0.971613
| 0
| 0.112114
| 0.083729
| 11,370
| 11
| 11,181
| 1,033.636364
| 0.689288
| 0.01117
| 0
| 0
| 1
| 0
| 0.619603
| 0.27989
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 12
|
719c6d701b55bd9f83018cbbed9ed50a59caf86f
| 881
|
py
|
Python
|
Curso-em-video/115-exerciciospython/d060_fatorial.py
|
FabianoBill/Estudos-em-python
|
32c3f9e37b83630c923ff7c0c77aa7d80fbc3174
|
[
"MIT"
] | 1
|
2021-05-24T19:44:04.000Z
|
2021-05-24T19:44:04.000Z
|
Curso-em-video/115-exerciciospython/d060_fatorial.py
|
FabianoBill/Estudos-em-python
|
32c3f9e37b83630c923ff7c0c77aa7d80fbc3174
|
[
"MIT"
] | null | null | null |
Curso-em-video/115-exerciciospython/d060_fatorial.py
|
FabianoBill/Estudos-em-python
|
32c3f9e37b83630c923ff7c0c77aa7d80fbc3174
|
[
"MIT"
] | null | null | null |
# Exercício Python 060: Faça um programa que leia um número qualquer e mostre o seu fatorial.
# n = int(input("Digite um número natural para saber seu fatorial: "))
# f = 1
# if n < 0:
# print(f"{n} é negativo, não é um número natural, tente novamente.")
# n = int(input("Digite um número natural para saber seu fatorial: "))
# print(f"{n}! = ", end="")
# while n > 0:
# print(f"{n} ", end="")
# print(" x " if n > 1 else " = ", end="")
# f = f * n
# n -= 1
# print(f)
n = int(input("Digite um número natural para saber seu fatorial: "))
f = 1
if n < 0:
print(f"{n} é negativo, não é um número natural, tente novamente.")
n = int(input("Digite um número natural para saber seu fatorial: "))
print(f"{n}! = ", end="")
for n in range(n, 0, -1):
print(f"{n}", end="")
print(" x " if n > 1 else "= ", end="")
f = f * n
n -= 1
print(f)
| 31.464286
| 93
| 0.568672
| 148
| 881
| 3.385135
| 0.263514
| 0.035928
| 0.097804
| 0.11976
| 0.814371
| 0.810379
| 0.810379
| 0.810379
| 0.810379
| 0.810379
| 0
| 0.021212
| 0.250851
| 881
| 27
| 94
| 32.62963
| 0.737879
| 0.528944
| 0
| 0.166667
| 0
| 0
| 0.428928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
e0cb9134c42f7b640d8b6183ff5a3c9c08c4803e
| 11,172
|
py
|
Python
|
tests/utils/test_type_util.py
|
pSpitzner/python-benedict
|
d55cfe99d394e3931326c0b712de49c169ae68da
|
[
"MIT"
] | 365
|
2019-05-21T05:50:30.000Z
|
2022-03-29T11:35:35.000Z
|
tests/utils/test_type_util.py
|
pSpitzner/python-benedict
|
d55cfe99d394e3931326c0b712de49c169ae68da
|
[
"MIT"
] | 78
|
2019-11-16T12:22:54.000Z
|
2022-03-14T12:21:30.000Z
|
tests/utils/test_type_util.py
|
pSpitzner/python-benedict
|
d55cfe99d394e3931326c0b712de49c169ae68da
|
[
"MIT"
] | 26
|
2019-12-16T06:34:12.000Z
|
2022-02-28T07:16:41.000Z
|
# -*- coding: utf-8 -*-
from benedict.utils import type_util
from datetime import datetime
from decimal import Decimal
import unittest
class type_util_test_case(unittest.TestCase):
def test_is_bool(self):
f = type_util.is_bool
self.assertFalse(f(None))
self.assertTrue(f(True))
self.assertTrue(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_collection(self):
f = type_util.is_collection
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertTrue(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_datetime(self):
f = type_util.is_datetime
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertTrue(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_decimal(self):
f = type_util.is_decimal
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertTrue(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_dict(self):
f = type_util.is_dict
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_dict_or_list(self):
f = type_util.is_dict_or_list
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_dict_or_list_or_tuple(self):
f = type_util.is_dict_or_list_or_tuple
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_float(self):
f = type_util.is_float
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertTrue(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_function(self):
f = type_util.is_function
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertTrue(f(lambda a: a))
def test_is_integer(self):
f = type_util.is_integer
self.assertFalse(f(None))
self.assertTrue(f(True))
self.assertTrue(f(False))
self.assertTrue(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_json_serializable(self):
f = type_util.is_json_serializable
self.assertTrue(f(None))
self.assertTrue(f(True))
self.assertTrue(f(False))
self.assertTrue(f(int(0)))
self.assertTrue(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertTrue(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_list(self):
f = type_util.is_list
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_list_or_tuple(self):
f = type_util.is_list_or_tuple
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_none(self):
f = type_util.is_none
self.assertTrue(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_not_none(self):
f = type_util.is_not_none
self.assertFalse(f(None))
self.assertTrue(f(True))
self.assertTrue(f(False))
self.assertTrue(f(int(0)))
self.assertTrue(f(float(0.5)))
self.assertTrue(f(Decimal(0.5)))
self.assertTrue(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertTrue(f([0, 1, 2]))
self.assertTrue(f(set([0, 1, 2])))
self.assertTrue(f({'a':0, 'b':1, 'c':2}))
self.assertTrue(f('hello world'))
self.assertTrue(f(lambda a: a))
def test_is_set(self):
f = type_util.is_set
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertTrue(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_string(self):
f = type_util.is_string
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertFalse(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertTrue(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_tuple(self):
f = type_util.is_tuple
self.assertFalse(f(None))
self.assertFalse(f(True))
self.assertFalse(f(False))
self.assertFalse(f(int(0)))
self.assertFalse(f(float(0.5)))
self.assertFalse(f(Decimal(0.5)))
self.assertFalse(f(datetime.now()))
self.assertTrue(f((0, 1, 2, )))
self.assertFalse(f([0, 1, 2]))
self.assertFalse(f(set([0, 1, 2])))
self.assertFalse(f({'a':0, 'b':1, 'c':2}))
self.assertFalse(f('hello world'))
self.assertFalse(f(lambda a: a))
def test_is_uuid(self):
f = type_util.is_uuid
self.assertTrue(f('ca761232ed4211cebacd00aa0057b223'))
self.assertTrue(f('CA761232-ED42-11CE-BACD-00AA0057B223'))
self.assertTrue(f('CA761232-ED42-11CE-BACD-00AA0057B223'))
self.assertFalse(f('CA761232-ED42-11CE-BACD-00AA0057B22X'))
self.assertFalse(f('CA761232-ED42-11CE-BACD-00AA0057B22'))
| 36.272727
| 67
| 0.568475
| 1,586
| 11,172
| 3.941362
| 0.037831
| 0.453527
| 0.483763
| 0.06047
| 0.93745
| 0.906575
| 0.891857
| 0.880979
| 0.869621
| 0.853943
| 0
| 0.04741
| 0.244808
| 11,172
| 307
| 68
| 36.390879
| 0.693493
| 0.00188
| 0
| 0.836879
| 0
| 0
| 0.038299
| 0.015696
| 0
| 0
| 0
| 0
| 0.847518
| 1
| 0.067376
| false
| 0
| 0.014184
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e0d6e360cae4fbeb4d195d7b423c63bc67f830f2
| 13,253
|
py
|
Python
|
SBaaS_COBRA/stage02_physiology_pairWiseTest_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
SBaaS_COBRA/stage02_physiology_pairWiseTest_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
SBaaS_COBRA/stage02_physiology_pairWiseTest_postgresql_models.py
|
dmccloskey/SBaaS_COBRA
|
65635495c4fb7cc98f5c6629e495850e908ea858
|
[
"MIT"
] | null | null | null |
from SBaaS_base.postgresql_orm_base import *
class data_stage02_physiology_pairWiseTest(Base):
__tablename__ = 'data_stage02_physiology_pairWiseTest'
id = Column(Integer, Sequence('data_stage02_physiology_pairWiseTest_id_seq'), primary_key=True)
analysis_id = Column(String(500))
simulation_id_1 = Column(String(500))
simulation_id_2 = Column(String(500))
#simulation_dateAndTime = Column(DateTime);
#experiment_id = Column(String(50))
#model_id = Column(String(50))
#sample_name_abbreviation = Column(String(100))
rxn_id = Column(String(100))
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
test_stat = Column(Float)
test_description = Column(String(50));
pvalue = Column(Float)
pvalue_corrected = Column(Float)
pvalue_corrected_description = Column(String(500))
mean = Column(Float)
ci_lb = Column(Float)
ci_ub = Column(Float)
ci_level = Column(Float)
fold_change = Column(Float)
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('analysis_id','simulation_id_1','simulation_id_2','rxn_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.pvalue_corrected=row_dict_I['pvalue_corrected'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fold_change=row_dict_I['fold_change'];
self.ci_level=row_dict_I['ci_level'];
self.ci_ub=row_dict_I['ci_ub'];
self.ci_lb=row_dict_I['ci_lb'];
self.mean=row_dict_I['mean'];
self.pvalue_corrected_description=row_dict_I['pvalue_corrected_description'];
self.pvalue=row_dict_I['pvalue'];
self.test_description=row_dict_I['test_description'];
self.test_stat=row_dict_I['test_stat'];
self.flux_units=row_dict_I['flux_units'];
self.rxn_id=row_dict_I['rxn_id'];
self.simulation_id_2=row_dict_I['simulation_id_2'];
self.simulation_id_1=row_dict_I['simulation_id_1'];
def __set__row__(self,
analysis_id_I,simulation_id_1_I,simulation_id_2_I,
#simulation_dateAndTime_I,
#experiment_id_I,model_id_I,
# sample_name_abbreviation_I,
rxn_id_I,flux_units_I,
mean_I, test_stat_I, test_description_I,
pvalue_I, pvalue_corrected_I, pvalue_corrected_description_I,
ci_lb_I, ci_ub_I, ci_level_I,
fold_change_I,
used__I,comment__I):
self.analysis_id=analysis_id_I
self.simulation_id_1=simulation_id_1_I
self.simulation_id_2=simulation_id_2_I
#self.simulation_dateAndTime=simulation_dateAndTime_I
#self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.sample_name_abbreviation=sample_name_abbreviation_I
self.rxn_id=rxn_id_I
self.flux_units=flux_units_I
self.mean=mean_I;
self.test_stat=test_stat_I;
self.test_description=test_description_I;
self.pvalue=pvalue_I;
self.pvalue_corrected=pvalue_corrected_I;
self.pvalue_corrected_description=pvalue_corrected_description_I;
self.ci_lb=ci_lb_I;
self.ci_ub=ci_ub_I;
self.ci_level=ci_level_I;
self.fold_change=fold_change_I;
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'pvalue_corrected':self.pvalue_corrected,
'comment_':self.comment_,
'used_':self.used_,
'fold_change':self.fold_change,
'ci_level':self.ci_level,
'ci_ub':self.ci_ub,
'ci_lb':self.ci_lb,
'mean':self.mean,
'pvalue_corrected_description':self.pvalue_corrected_description,
'pvalue':self.pvalue,
'test_description':self.test_description,
'test_stat':self.test_stat,
'flux_units':self.flux_units,
'rxn_id':self.rxn_id,
'simulation_id_2':self.simulation_id_2,
'simulation_id_1':self.simulation_id_1,
}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_physiology_pairWiseTestMetabolites(Base):
__tablename__ = 'data_stage02_physiology_pairWiseTestMetabolites'
id = Column(Integer, Sequence('data_stage02_physiology_pairWiseTestMetabolites_id_seq'), primary_key=True)
analysis_id = Column(String(500))
simulation_id_1 = Column(String(500))
simulation_id_2 = Column(String(500))
#simulation_dateAndTime = Column(DateTime);
#experiment_id = Column(String(50))
#model_id = Column(String(50))
#sample_name_abbreviation = Column(String(100))
met_id = Column(String(100))
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
test_stat = Column(Float)
test_description = Column(String(50));
pvalue = Column(Float)
pvalue_corrected = Column(Float)
pvalue_corrected_description = Column(String(500))
mean = Column(Float)
ci_lb = Column(Float)
ci_ub = Column(Float)
ci_level = Column(Float)
fold_change = Column(Float)
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('analysis_id','simulation_id_1','simulation_id_2','met_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.pvalue_corrected_description=row_dict_I['pvalue_corrected_description'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.test_description=row_dict_I['test_description'];
self.test_stat=row_dict_I['test_stat'];
self.flux_units=row_dict_I['flux_units'];
self.met_id=row_dict_I['met_id'];
self.simulation_id_2=row_dict_I['simulation_id_2'];
self.simulation_id_1=row_dict_I['simulation_id_1'];
self.pvalue=row_dict_I['pvalue'];
self.pvalue_corrected=row_dict_I['pvalue_corrected'];
self.fold_change=row_dict_I['fold_change'];
self.ci_level=row_dict_I['ci_level'];
self.ci_ub=row_dict_I['ci_ub'];
self.ci_lb=row_dict_I['ci_lb'];
self.mean=row_dict_I['mean'];
def __set__row__(self,
analysis_id_I,simulation_id_1_I,simulation_id_2_I,
#simulation_dateAndTime_I,
#experiment_id_I,model_id_I,
# sample_name_abbreviation_I,
met_id_I,flux_units_I,
mean_I, test_stat_I, test_description_I,
pvalue_I, pvalue_corrected_I, pvalue_corrected_description_I,
ci_lb_I, ci_ub_I, ci_level_I,
fold_change_I,
used__I,comment__I):
self.analysis_id=analysis_id_I
self.simulation_id_1=simulation_id_1_I
self.simulation_id_2=simulation_id_2_I
#self.simulation_dateAndTime=simulation_dateAndTime_I
#self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.sample_name_abbreviation=sample_name_abbreviation_I
self.met_id=met_id_I
self.flux_units=flux_units_I
self.mean=mean_I;
self.test_stat=test_stat_I;
self.test_description=test_description_I;
self.pvalue=pvalue_I;
self.pvalue_corrected=pvalue_corrected_I;
self.pvalue_corrected_description=pvalue_corrected_description_I;
self.ci_lb=ci_lb_I;
self.ci_ub=ci_ub_I;
self.ci_level=ci_level_I;
self.fold_change=fold_change_I;
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'pvalue_corrected':self.pvalue_corrected,
'comment_':self.comment_,
'used_':self.used_,
'fold_change':self.fold_change,
'ci_level':self.ci_level,
'ci_ub':self.ci_ub,
'ci_lb':self.ci_lb,
'mean':self.mean,
'pvalue_corrected_description':self.pvalue_corrected_description,
'pvalue':self.pvalue,
'test_description':self.test_description,
'test_stat':self.test_stat,
'flux_units':self.flux_units,
'met_id':self.met_id,
'simulation_id_2':self.simulation_id_2,
'simulation_id_1':self.simulation_id_1,}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_physiology_pairWiseTestSubsystems(Base):
__tablename__ = 'data_stage02_physiology_pairWiseTestSubsystems'
id = Column(Integer, Sequence('data_stage02_physiology_pairWiseTestSubsystems_id_seq'), primary_key=True)
analysis_id = Column(String(500))
simulation_id_1 = Column(String(500))
simulation_id_2 = Column(String(500))
#simulation_dateAndTime = Column(DateTime);
#experiment_id = Column(String(50))
#model_id = Column(String(50))
#sample_name_abbreviation = Column(String(100))
subsystem_id = Column(String(100))
flux_units = Column(String(50), default = 'mmol*gDW-1*hr-1');
test_stat = Column(Float)
test_description = Column(String(50));
pvalue = Column(Float)
pvalue_corrected = Column(Float)
pvalue_corrected_description = Column(String(500))
mean = Column(Float)
ci_lb = Column(Float)
ci_ub = Column(Float)
ci_level = Column(Float)
fold_change = Column(Float)
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
UniqueConstraint('analysis_id','simulation_id_1','simulation_id_2','subsystem_id'),
)
def __init__(self,
row_dict_I,
):
self.analysis_id=row_dict_I['analysis_id'];
self.mean=row_dict_I['mean'];
self.simulation_id_1=row_dict_I['simulation_id_1'];
self.simulation_id_2=row_dict_I['simulation_id_2'];
self.subsystem_id=row_dict_I['subsystem_id'];
self.flux_units=row_dict_I['flux_units'];
self.test_stat=row_dict_I['test_stat'];
self.test_description=row_dict_I['test_description'];
self.pvalue=row_dict_I['pvalue'];
self.pvalue_corrected=row_dict_I['pvalue_corrected'];
self.pvalue_corrected_description=row_dict_I['pvalue_corrected_description'];
self.ci_lb=row_dict_I['ci_lb'];
self.ci_ub=row_dict_I['ci_ub'];
self.ci_level=row_dict_I['ci_level'];
self.fold_change=row_dict_I['fold_change'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self,
analysis_id_I,simulation_id_1_I,simulation_id_2_I,
#simulation_dateAndTime_I,
#experiment_id_I,model_id_I,
# sample_name_abbreviation_I,
subsystem_id_I,flux_units_I,
mean_I, test_stat_I, test_description_I,
pvalue_I, pvalue_corrected_I, pvalue_corrected_description_I,
ci_lb_I, ci_ub_I, ci_level_I,
fold_change_I,
used__I,comment__I):
self.analysis_id=analysis_id_I
self.simulation_id_1=simulation_id_1_I
self.simulation_id_2=simulation_id_2_I
#self.simulation_dateAndTime=simulation_dateAndTime_I
#self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.sample_name_abbreviation=sample_name_abbreviation_I
self.subsystem_id=subsystem_id_I
self.flux_units=flux_units_I
self.mean=mean_I;
self.test_stat=test_stat_I;
self.test_description=test_description_I;
self.pvalue=pvalue_I;
self.pvalue_corrected=pvalue_corrected_I;
self.pvalue_corrected_description=pvalue_corrected_description_I;
self.ci_lb=ci_lb_I;
self.ci_ub=ci_ub_I;
self.ci_level=ci_level_I;
self.fold_change=fold_change_I;
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'analysis_id':self.analysis_id,
'pvalue_corrected':self.pvalue_corrected,
'comment_':self.comment_,
'used_':self.used_,
'fold_change':self.fold_change,
'ci_level':self.ci_level,
'ci_ub':self.ci_ub,
'ci_lb':self.ci_lb,
'mean':self.mean,
'pvalue_corrected_description':self.pvalue_corrected_description,
'pvalue':self.pvalue,
'test_description':self.test_description,
'test_stat':self.test_stat,
'flux_units':self.flux_units,
'subsystem_id':self.subsystem_id,
'simulation_id_2':self.simulation_id_2,
'simulation_id_1':self.simulation_id_1,}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
| 41.286604
| 110
| 0.654644
| 1,720
| 13,253
| 4.524419
| 0.044767
| 0.042406
| 0.055513
| 0.028913
| 0.960164
| 0.932408
| 0.932408
| 0.907993
| 0.905423
| 0.880365
| 0
| 0.015584
| 0.244699
| 13,253
| 320
| 111
| 41.415625
| 0.761838
| 0.090546
| 0
| 0.877323
| 0
| 0
| 0.129545
| 0.037191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04461
| false
| 0
| 0.003717
| 0.022305
| 0.304833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0d9f52967efd1175c00ac0cb93c57f9d0b93d82
| 32,763
|
py
|
Python
|
qiime2/core/type/tests/test_util.py
|
misialq/qiime2
|
6d8932eda130d4a9356f977fece2e252c135d0b9
|
[
"BSD-3-Clause"
] | 1
|
2021-07-09T08:58:59.000Z
|
2021-07-09T08:58:59.000Z
|
qiime2/core/type/tests/test_util.py
|
misialq/qiime2
|
6d8932eda130d4a9356f977fece2e252c135d0b9
|
[
"BSD-3-Clause"
] | null | null | null |
qiime2/core/type/tests/test_util.py
|
misialq/qiime2
|
6d8932eda130d4a9356f977fece2e252c135d0b9
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from qiime2.core.type import (
parse_primitive, Int, Float, Bool, Str, List, Set, Metadata,
MetadataColumn)
class TestParsePrimitiveNonCollectionsSimple(unittest.TestCase):
def test_metadata_expr(self):
with self.assertRaisesRegex(ValueError, 'Metadata may not be parsed'):
parse_primitive(Metadata, '42')
def test_metadata_column_expr(self):
with self.assertRaisesRegex(ValueError,
'MetadataColumn.* may not be parsed'):
parse_primitive(MetadataColumn, '42')
def test_int_type_int_value(self):
obs = parse_primitive(Int, '42')
self.assertEqual(obs, 42)
self.assertIsInstance(obs, int)
def test_float_type_int_value(self):
obs = parse_primitive(Float, '42')
self.assertEqual(obs, 42.0)
self.assertIsInstance(obs, float)
def test_bool_type_int_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Bool, '42')
def test_str_type_int_value(self):
obs = parse_primitive(Str, '42')
self.assertEqual(obs, '42')
self.assertIsInstance(obs, str)
def test_int_type_float_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int, '42.0')
def test_float_type_float_value(self):
obs = parse_primitive(Float, '42.0')
self.assertEqual(obs, 42.0)
self.assertIsInstance(obs, float)
def test_bool_type_float_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Bool, '42.0')
def test_str_type_float_value(self):
obs = parse_primitive(Str, '42.0')
self.assertEqual(obs, '42.0')
self.assertIsInstance(obs, str)
def test_int_type_bool_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int, 'True')
def test_float_type_bool_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Float, 'True')
def test_bool_type_bool_value(self):
obs = parse_primitive(Bool, 'True')
self.assertEqual(obs, True)
self.assertIsInstance(obs, bool)
def test_str_type_bool_value(self):
obs = parse_primitive(Str, 'True')
self.assertEqual(obs, 'True')
self.assertIsInstance(obs, str)
def test_int_type_str_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int, 'peanut')
def test_float_type_str_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Float, 'peanut')
def test_bool_type_str_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Bool, 'peanut')
def test_str_type_str_value(self):
obs = parse_primitive(Str, 'peanut')
self.assertEqual(obs, 'peanut')
self.assertIsInstance(obs, str)
class TestParsePrimitiveNonCollectionNonStringInputs(unittest.TestCase):
def test_int_type_int_value(self):
obs = parse_primitive(Int, 1)
self.assertEqual(obs, 1)
self.assertIsInstance(obs, int)
def test_float_type_float_value(self):
obs = parse_primitive(Float, 3.3)
self.assertEqual(obs, 3.3)
self.assertIsInstance(obs, float)
def test_bool_type_bool_value(self):
obs = parse_primitive(Bool, True)
self.assertEqual(obs, True)
self.assertIsInstance(obs, bool)
def test_str_type_str_value(self):
obs = parse_primitive(Str, 'peanut')
self.assertEqual(obs, 'peanut')
self.assertIsInstance(obs, str)
def test_int_type_bool_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int, True)
class TestParsePrimitiveNonCollectionsSimpleUnions(unittest.TestCase):
def setUp(self):
super().setUp()
self.exprs = [
Int | Bool,
Int | Str,
Float | Bool,
Float | Str,
Bool | Str,
]
def test_int_union_float_expr_int_value(self):
# Int | Float == Float
obs = parse_primitive(Int | Float, '42')
self.assertEqual(obs, 42.0)
self.assertIsInstance(obs, float)
def test_int_union_float_expr_float_value(self):
# Int | Float == Float
obs = parse_primitive(Int | Float, '42.0')
self.assertEqual(obs, 42.0)
self.assertIsInstance(obs, float)
def test_int_union_float_expr_bool_value(self):
# Int | Float == Float
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int | Float, 'True')
def test_int_union_float_expr_str_value(self):
# Int | Float == Float
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(Int | Float, 'peanut')
def test_simple_unions_with_int_value(self):
for expr in self.exprs:
with self.subTest(expr=expr):
obs = parse_primitive(expr, '42')
self.assertEqual(obs, 42)
self.assertIsInstance(obs, int)
def test_simple_unions_with_float_value(self):
for expr in self.exprs:
with self.subTest(expr=expr):
obs = parse_primitive(expr, '42.1')
self.assertEqual(obs, 42.1)
self.assertIsInstance(obs, float)
def test_simple_unions_with_bool_value(self):
for expr in self.exprs:
with self.subTest(expr=expr):
obs = parse_primitive(expr, 'True')
self.assertEqual(obs, True)
self.assertIsInstance(obs, bool)
def test_simple_unions_with_str_value(self):
for expr in self.exprs:
with self.subTest(expr=expr):
obs = parse_primitive(expr, 'peanut')
self.assertEqual(obs, 'peanut')
self.assertIsInstance(obs, str)
class TestParsePrimitiveCollectionsSimple(unittest.TestCase):
def test_list_of_int(self):
obs = parse_primitive(List[Int], ('1', '2', '3'))
self.assertEqual(obs, [1, 2, 3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
def test_list_of_int_bad_value_variant_a(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int], ('True', '2', '3'))
def test_list_of_int_bad_value_variant_b(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int], ('1', '2', 'False'))
def test_set_of_int(self):
obs = parse_primitive(Set[Int], ('1', '2', '3'))
self.assertEqual(obs, {1, 2, 3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), int)
def test_list_of_float(self):
obs = parse_primitive(List[Float], ('1.0', '2.0', '3.0'))
self.assertEqual(obs, [1.0, 2.0, 3.0])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_set_of_float(self):
obs = parse_primitive(Set[Float], ('1.0', '2.0', '3.0'))
self.assertEqual(obs, {1.0, 2.0, 3.0})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_list_of_bool(self):
obs = parse_primitive(List[Bool], ('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_set_of_bool(self):
obs = parse_primitive(Set[Bool], ('True', 'False'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_list_of_str(self):
obs = parse_primitive(List[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_set_of_str(self):
obs = parse_primitive(Set[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
# The next tests _aren't_ monomorphic, because unions of Int and Float
# always yield a Float (List[Int] | List[Float] == List[Float]).
def test_list_int_or_float_with_int_value(self):
obs = parse_primitive(List[Int] | List[Float], ('1', '2', '3'))
self.assertEqual(obs, [1.0, 2.0, 3.0])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_set_int_or_float_with_int_value(self):
obs = parse_primitive(Set[Int] | Set[Float], ('1', '2', '3'))
self.assertEqual(obs, {1.0, 2.0, 3.0})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_list_int_or_float_with_float_value(self):
obs = parse_primitive(List[Int] | List[Float], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, [1.1, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_set_int_or_float_with_float_value(self):
obs = parse_primitive(Set[Int] | Set[Float], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, {1.1, 2.2, 3.3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_list_int_or_float_int_value(self):
obs = parse_primitive(List[Int | Float], ('1', '2', '3'))
self.assertEqual(obs, [1.0, 2.0, 3.0])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_set_int_or_float_int_value(self):
obs = parse_primitive(Set[Int | Float], ('1', '2', '3'))
self.assertEqual(obs, {1.0, 2.0, 3.0})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
class TestParsePrimitiveCollectionsMonomorphic(unittest.TestCase):
def test_list_int_or_bool_with_int_value(self):
obs = parse_primitive(List[Int] | List[Bool], ('1', '2', '3'))
self.assertEqual(obs, [1, 2, 3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
def test_list_int_or_bool_with_bool_value(self):
obs = parse_primitive(List[Int] | List[Bool],
('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_int_or_bool_with_mixed_value_variant_a(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int] | List[Bool], ('True', '2', '3'))
def test_list_int_or_bool_with_mixed_value_variant_b(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int] | List[Bool], ('1', '2', 'True'))
def test_list_int_or_bool_with_mixed_value_variant_c(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int] | List[Bool], ('False', '2', 'True'))
def test_set_int_or_bool_with_int_value(self):
obs = parse_primitive(Set[Int] | Set[Bool], ('1', '2', '3'))
self.assertEqual(obs, {1, 2, 3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), int)
def test_set_int_or_bool_with_bool_value(self):
obs = parse_primitive(Set[Int] | Set[Bool], ('True', 'False'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_list_int_or_str_with_int_value(self):
obs = parse_primitive(List[Int] | List[Str], ('1', '2', '3'))
self.assertEqual(obs, [1, 2, 3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
def test_list_int_or_str_with_str_value(self):
obs = parse_primitive(List[Int] | List[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_int_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Int] | List[Str], ('1', 'the', 'dog'))
self.assertEqual(obs, ['1', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[1], str)
def test_list_int_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Int] | List[Str], ('peanut', 'the', '1'))
self.assertEqual(obs, ['peanut', 'the', '1'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[2], str)
def test_set_int_or_str_with_int_value(self):
obs = parse_primitive(Set[Int] | Set[Str], ('1', '2', '3'))
self.assertEqual(obs, {1, 2, 3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), int)
def test_set_int_or_str_with_str_value(self):
obs = parse_primitive(Set[Int] | Set[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
def test_list_float_or_bool_with_float_value(self):
obs = parse_primitive(List[Float] | List[Bool], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, [1.1, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_list_float_or_bool_with_bool_value(self):
obs = parse_primitive(List[Float] | List[Bool],
('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_float_or_bool_with_mixed_value_variant_a(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Float] | List[Bool],
('1.1', 'False', 'True'))
def test_list_float_or_bool_with_mixed_value_variant_b(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Float] | List[Bool],
('True', 'False', '3.3'))
def test_set_float_or_bool_with_float_value(self):
obs = parse_primitive(Set[Float] | Set[Bool], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, {1.1, 2.2, 3.3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_set_float_or_bool_with_bool_value(self):
obs = parse_primitive(Set[Float] | Set[Bool],
('True', 'False', 'True'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_list_float_or_str_with_float_value(self):
obs = parse_primitive(List[Float] | List[Str], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, [1.1, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_list_float_or_str_with_str_value(self):
obs = parse_primitive(List[Float] | List[Str],
('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_float_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Float] | List[Str],
('1.1', 'the', 'dog'))
self.assertEqual(obs, ['1.1', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_float_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Float] | List[Str],
('peanut', 'the', '3.3'))
self.assertEqual(obs, ['peanut', 'the', '3.3'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[-1], str)
def test_set_float_or_str_with_float_value(self):
obs = parse_primitive(Set[Float] | Set[Str], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, {1.1, 2.2, 3.3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_set_float_or_str_with_str_value(self):
obs = parse_primitive(Set[Float] | Set[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
def test_list_bool_or_str_with_bool_value(self):
obs = parse_primitive(List[Bool] | List[Str],
('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_bool_or_str_with_str_value(self):
obs = parse_primitive(List[Bool] | List[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_bool_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Bool] | List[Str], ('True', 'the', 'dog'))
self.assertEqual(obs, ['True', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_bool_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Bool] | List[Str],
('peanut', 'the', 'True'))
self.assertEqual(obs, ['peanut', 'the', 'True'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[-1], str)
def test_set_bool_or_str_with_bool_value(self):
obs = parse_primitive(Set[Bool] | Set[Str],
('True', 'False', 'True'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_set_bool_or_str_with_str_value(self):
obs = parse_primitive(Set[Bool] | Set[Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
def test_list_bool_or_str_with_mixed_value(self):
obs = parse_primitive(List[Bool] | List[Str],
('peanut', 'the', 'True'))
self.assertEqual(obs, ['peanut', 'the', 'True'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[-1], str)
class TestParsePrimitiveCollectionsComposite(unittest.TestCase):
def test_list_int_or_bool_with_int_value(self):
obs = parse_primitive(List[Int | Bool], ('1', '2', '3'))
self.assertEqual(obs, [1, 2, 3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
def test_list_int_or_bool_with_float_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool], ('1.1', '2.2', '3.3'))
def test_list_int_or_bool_with_bool_value(self):
obs = parse_primitive(List[Int | Bool], ('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_int_or_bool_with_str_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool], ('peanut', 'the', 'dog'))
def test_list_int_or_bool_with_mixed_value(self):
obs = parse_primitive(List[Int | Bool], ('1', 'False', '2', 'True'))
self.assertEqual(obs, [1, False, 2, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
self.assertIsInstance(obs[1], bool)
def test_list_int_or_bool_with_mixed_value_variant_a(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool], ('peanut', 'False', '2', 'True'))
def test_list_int_or_bool_with_mixed_value_variant_b(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool], ('1', 'False', '2', 'peanut'))
def test_list_int_or_bool_with_bad_mix_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool], ('1', 'True', 'dog'))
def test_set_int_or_bool_with_int_value(self):
obs = parse_primitive(Set[Int | Bool], ('1', '2', '3'))
self.assertEqual(obs, {1, 2, 3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), int)
def test_set_int_or_bool_with_bool_value(self):
obs = parse_primitive(Set[Int | Bool], ('True', 'False', 'True'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_set_int_or_bool_with_mixed_value(self):
obs = parse_primitive(Set[Int | Bool], ('1', 'False', '2', 'True'))
self.assertEqual(obs, {1, False, 2, True})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
self.assertIsInstance(obs.pop(), int)
def test_list_int_or_str_with_int_value(self):
obs = parse_primitive(List[Int | Str], ('1', '2', '3'))
self.assertEqual(obs, [1, 2, 3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
def test_list_int_or_str_with_str_value(self):
obs = parse_primitive(List[Int | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_int_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Int | Str], ('1', 'the', 'dog'))
self.assertEqual(obs, [1, 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], int)
self.assertIsInstance(obs[1], str)
def test_list_int_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Int | Str], ('peanut', 'the', '1'))
self.assertEqual(obs, ['peanut', 'the', 1])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[2], int)
def test_set_int_or_str_with_int_value(self):
obs = parse_primitive(Set[Int | Str], ('1', '2', '3'))
self.assertEqual(obs, {1, 2, 3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), int)
def test_set_int_or_str_with_str_value(self):
obs = parse_primitive(Set[Int | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
def test_set_int_or_str_with_mixed_value(self):
obs = parse_primitive(Set[Int | Str], ('1', 'the', '2', 'dog'))
self.assertEqual(obs, {1, 'the', 2, 'dog'})
self.assertIsInstance(obs, set)
def test_list_float_or_bool_with_float_value(self):
obs = parse_primitive(List[Float | Bool], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, [1.1, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_list_float_or_bool_with_bool_value(self):
obs = parse_primitive(List[Float | Bool], ('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_float_or_bool_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Float | Bool], ('True', '2.2', '3.3'))
self.assertEqual(obs, [True, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
self.assertIsInstance(obs[1], float)
def test_list_float_or_bool_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Float | Bool], ('1.1', '2.2', 'False'))
self.assertEqual(obs, [1.1, 2.2, False])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
self.assertIsInstance(obs[-1], bool)
def test_list_float_or_bool_with_bad_mix_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Float | Bool], ('1.1', '2.2', 'peanut'))
def test_set_float_or_bool_with_float_value(self):
obs = parse_primitive(Set[Float | Bool], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, {1.1, 2.2, 3.3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_set_float_or_bool_with_bool_value(self):
obs = parse_primitive(Set[Float | Bool], ('True', 'False', 'True'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_list_float_or_str_with_float_value(self):
obs = parse_primitive(List[Float | Str], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, [1.1, 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
def test_list_float_or_str_with_str_value(self):
obs = parse_primitive(List[Float | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_float_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Float | Str], ('peanut', '2.2', '3.3'))
self.assertEqual(obs, ['peanut', 2.2, 3.3])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[1], float)
def test_list_float_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Float | Str], ('1.1', '2.2', 'dog'))
self.assertEqual(obs, [1.1, 2.2, 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], float)
self.assertIsInstance(obs[-1], str)
def test_set_float_or_str_with_float_value(self):
obs = parse_primitive(Set[Float | Str], ('1.1', '2.2', '3.3'))
self.assertEqual(obs, {1.1, 2.2, 3.3})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), float)
def test_set_float_or_str_with_str_value(self):
obs = parse_primitive(Set[Float | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
def test_list_bool_or_str_with_bool_value(self):
obs = parse_primitive(List[Bool | Str], ('True', 'False', 'True'))
self.assertEqual(obs, [True, False, True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
def test_list_bool_or_str_with_str_value(self):
obs = parse_primitive(List[Bool | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, ['peanut', 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
def test_list_bool_or_str_with_mixed_value_variant_a(self):
obs = parse_primitive(List[Bool | Str], ('True', 'the', 'dog'))
self.assertEqual(obs, [True, 'the', 'dog'])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], bool)
self.assertIsInstance(obs[-1], str)
def test_list_bool_or_str_with_mixed_value_variant_b(self):
obs = parse_primitive(List[Bool | Str], ('peanut', 'the', 'True'))
self.assertEqual(obs, ['peanut', 'the', True])
self.assertIsInstance(obs, list)
self.assertIsInstance(obs[0], str)
self.assertIsInstance(obs[-1], bool)
def test_set_bool_or_str_with_bool_value(self):
obs = parse_primitive(Set[Bool | Str], ('True', 'False', 'True'))
self.assertEqual(obs, {True, False})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), bool)
def test_set_bool_or_str_with_str_value(self):
obs = parse_primitive(Set[Bool | Str], ('peanut', 'the', 'dog'))
self.assertEqual(obs, {'peanut', 'the', 'dog'})
self.assertIsInstance(obs, set)
self.assertIsInstance(obs.pop(), str)
class TestParsePrimitiveCollectionsComplex(unittest.TestCase):
def test_list_int_bool_or_list_float_with_bool_int_value(self):
obs = parse_primitive(List[Int | Bool] | List[Float],
('1', '2', 'True', 'False'))
self.assertEqual(obs, [1, 2, True, False])
def test_list_int_bool_or_list_float_with_float_value(self):
obs = parse_primitive(List[Int | Bool] | List[Float],
('1.1', '2.2', '3.3', '4.4'))
self.assertEqual(obs, [1.1, 2.2, 3.3, 4.4])
def test_list_int_bool_or_list_float_with_bad_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Int | Bool] | List[Float],
('1', '2.2', 'True', 'False'))
def test_list_int_str_or_list_float_with_str_int_value(self):
obs = parse_primitive(List[Int | Str] | List[Float],
('1', '2', 'peanut', 'the'))
self.assertEqual(obs, [1, 2, 'peanut', 'the'])
def test_list_int_str_or_list_float_with_float_value(self):
obs = parse_primitive(List[Int | Str] | List[Float],
('1.1', '2.2', '3.3', '4.4'))
self.assertEqual(obs, [1.1, 2.2, 3.3, 4.4])
def test_list_int_str_or_list_float_str_with_float_value(self):
obs = parse_primitive(List[Int | Str] | List[Float | Str],
('1.1', '2.2', '3.3', '4.4'))
self.assertEqual(obs, [1.1, 2.2, 3.3, 4.4])
def test_list_int_str_or_list_float_str_bool_with_float_value(self):
obs = parse_primitive(List[Int | Str] | List[Float | Str | Bool],
('1.1', '2.2', '3.3', '4.4'))
self.assertEqual(obs, [1.1, 2.2, 3.3, 4.4])
def test_list_int_str_or_list_float_str_bool_with_float_str_value(self):
obs = parse_primitive(List[Int | Str] | List[Float | Str | Bool],
('1.1', '2.2', 'the', 'peanut'))
self.assertEqual(obs, [1.1, 2.2, 'the', 'peanut'])
def test_list_int_str_or_list_float_str_bool_with_float_bool_value(self):
obs = parse_primitive(List[Int | Str] | List[Float | Str | Bool],
('1.1', '2.2', 'True', 'False'))
self.assertEqual(obs, [1.1, 2.2, True, False])
def test_list_int_str_or_list_float_with_mixed_value(self):
obs = parse_primitive(List[Int | Str] | List[Float],
('1.1', '2', 'True', 'peanut'))
self.assertEqual(obs, ['1.1', 2, 'True', 'peanut'])
def test_list_float_bool_or_list_str_with_float_bool_value(self):
obs = parse_primitive(List[Float | Bool] | List[Int],
('1', '2', 'True', 'False'))
self.assertEqual(obs, [1, 2, True, False])
def test_list_float_bool_or_list_str_with_int_value(self):
obs = parse_primitive(List[Float | Bool] | List[Int],
('1', '2', '3', '4'))
self.assertEqual(obs, [1, 2, 3, 4])
def test_list_float_bool_or_list_str_with_bad_value(self):
with self.assertRaisesRegex(ValueError, 'Could not coerce'):
parse_primitive(List[Float | Bool] | List[Int],
('1', '2.2', 'True', 'peanut'))
def test_set_int_bool_or_list_float_with_bool_int_value(self):
obs = parse_primitive(Set[Int | Bool] | Set[Float],
('1', '2', 'True', 'False'))
self.assertEqual(obs, {1, 2, True, False})
if __name__ == '__main__':
unittest.main()
| 42.111825
| 79
| 0.619876
| 4,270
| 32,763
| 4.517799
| 0.023888
| 0.180395
| 0.207454
| 0.104505
| 0.942823
| 0.930849
| 0.900575
| 0.884765
| 0.864548
| 0.845109
| 0
| 0.022707
| 0.231145
| 32,763
| 777
| 80
| 42.166023
| 0.743112
| 0.016787
| 0
| 0.616747
| 0
| 0
| 0.059319
| 0
| 0
| 0
| 0
| 0
| 0.489533
| 1
| 0.21095
| false
| 0
| 0.003221
| 0
| 0.225443
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e0ebe32bde903e9a3c0b1621fff9a50dc1015bac
| 4,955
|
py
|
Python
|
routes/ocr.py
|
moonrailgun/ai-api
|
08a2216187d7c2ac1d689ac3137a5617349f5bb1
|
[
"MIT"
] | null | null | null |
routes/ocr.py
|
moonrailgun/ai-api
|
08a2216187d7c2ac1d689ac3137a5617349f5bb1
|
[
"MIT"
] | null | null | null |
routes/ocr.py
|
moonrailgun/ai-api
|
08a2216187d7c2ac1d689ac3137a5617349f5bb1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import request, jsonify
from flask.blueprints import Blueprint
from ai_clients import ocr_client
import base64
ocr = Blueprint('ocr', __name__)
ALLOWED_EXTENSIONS = ['png', 'jpg', 'bmp']
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@ocr.route('/general/url', methods=['post'])
def general_url():
data = request.get_json()
url = data.get('url')
language_type = data.get('language_type', 'CHN_ENG')
detect_direction = data.get('detect_direction', False)
detect_language = data.get('detect_language', False)
probability = data.get('probability', False)
if not url:
return jsonify({
'result': False,
'msg': u'缺少url',
})
res = ocr_client.basicGeneralUrl(url, {
'language_type': language_type,
'detect_direction': detect_direction,
'detect_language': detect_language,
'probability': probability,
})
return jsonify({
'result': True,
'data': res,
})
@ocr.route('/general/base64', methods=['post'])
def general_base64():
data = request.get_json()
base64str = data.get('base64')
language_type = data.get('language_type', 'CHN_ENG')
detect_direction = data.get('detect_direction', False)
detect_language = data.get('detect_language', False)
probability = data.get('probability', False)
if not base64str:
return jsonify({
'result': False,
'msg': u'缺少base64',
})
if ',' in base64str:
base64str = base64str.rsplit(',', 1)[1]
image = base64.b64decode(base64str) # 将base64解码
res = ocr_client.basicGeneral(image, {
'language_type': language_type,
'detect_direction': detect_direction,
'detect_language': detect_language,
'probability': probability,
})
if res.get('error_code'):
return jsonify({
'result': False,
'msg': res.get('error_msg', ''),
})
return jsonify({
'result': True,
'data': res,
})
@ocr.route('/general/image', methods=['post'])
def general_image():
image = request.files.get('image')
language_type = request.form.get('language_type', 'CHN_ENG')
detect_direction = request.form.get('detect_direction', False)
detect_language = request.form.get('detect_language', False)
probability = request.form.get('probability', False)
if not image:
return jsonify({
'result': False,
'msg': u'缺少image',
})
if not(image and allowed_file(image.filename)):
return jsonify({
'result': False,
'msg': u'文件上传失败,只允许jpg/png/bmp',
})
# 不存储
data = image.read()
res = ocr_client.basicGeneral(data, {
'language_type': language_type,
'detect_direction': detect_direction,
'detect_language': detect_language,
'probability': probability,
})
if res.get('error_code'):
return jsonify({
'result': False,
'msg': res.get('error_msg', ''),
})
return jsonify({
'result': True,
'data': res,
})
@ocr.route('/accurate/base64', methods=['post'])
def accurate_base64():
data = request.get_json()
base64str = data.get('base64')
detect_direction = data.get('detect_direction', False)
probability = data.get('probability', False)
if not base64str:
return jsonify({
'result': False,
'msg': u'缺少base64',
})
if ',' in base64str:
base64str = base64str.rsplit(',', 1)[1]
image = base64.b64decode(base64str) # 将base64解码
res = ocr_client.basicAccurate(image, {
'detect_direction': detect_direction,
'probability': probability,
})
if res.get('error_code'):
return jsonify({
'result': False,
'msg': res.get('error_msg', ''),
})
return jsonify({
'result': True,
'data': res,
})
@ocr.route('/accurate/image', methods=['post'])
def accurate_image():
image = request.files.get('image')
detect_direction = request.form.get('detect_direction', False)
probability = request.form.get('probability', False)
if not image:
return jsonify({
'result': False,
'msg': u'缺少image',
})
if not(image and allowed_file(image.filename)):
return jsonify({
'result': False,
'msg': u'文件上传失败,只允许jpg/png/bmp',
})
# 不存储
data = image.read()
res = ocr_client.basicAccurate(data, {
'detect_direction': detect_direction,
'probability': probability,
})
if res.get('error_code'):
return jsonify({
'result': False,
'msg': res.get('error_msg', ''),
})
return jsonify({
'result': True,
'data': res,
})
| 26.497326
| 78
| 0.582846
| 520
| 4,955
| 5.401923
| 0.136538
| 0.1068
| 0.108224
| 0.093984
| 0.802421
| 0.796369
| 0.760057
| 0.739409
| 0.706301
| 0.655037
| 0
| 0.016869
| 0.270232
| 4,955
| 186
| 79
| 26.639785
| 0.759956
| 0.009889
| 0
| 0.81457
| 0
| 0
| 0.182857
| 0.008571
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039735
| false
| 0
| 0.02649
| 0.006623
| 0.178808
| 0.013245
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1cad3d58b3e74875c6b7cb12122a4a5cc5bde161
| 3,377
|
py
|
Python
|
gmr/tests/test_sklearn.py
|
danielchen-pyc/gmr
|
6d87623cbc642fbdfab4044ed40cbabed12f3986
|
[
"BSD-3-Clause"
] | 99
|
2015-02-25T12:25:14.000Z
|
2022-02-11T15:27:29.000Z
|
gmr/tests/test_sklearn.py
|
danielchen-pyc/gmr
|
6d87623cbc642fbdfab4044ed40cbabed12f3986
|
[
"BSD-3-Clause"
] | 29
|
2015-04-30T23:37:14.000Z
|
2022-02-04T20:50:11.000Z
|
gmr/tests/test_sklearn.py
|
danielchen-pyc/gmr
|
6d87623cbc642fbdfab4044ed40cbabed12f3986
|
[
"BSD-3-Clause"
] | 37
|
2015-02-25T12:25:14.000Z
|
2021-12-15T08:00:17.000Z
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from gmr.utils import check_random_state
from nose.tools import assert_less, assert_greater
from nose.plugins.skip import SkipTest
def test_sklearn_regression():
"""Test regression with GaussianMixtureRegressor."""
try:
from gmr.sklearn import GaussianMixtureRegressor
except ImportError:
raise SkipTest("sklearn is not available")
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
noise = random_state.randn(n_samples, 1) * 0.01
y = np.vstack((y1, y2)) + noise
gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)
gmr.fit(x, y)
assert_array_almost_equal(gmr.gmm_.priors, 0.5 * np.ones(2), decimal=2)
assert_array_almost_equal(gmr.gmm_.means[0], np.array([0.5, 2.5]), decimal=2)
assert_array_almost_equal(gmr.gmm_.means[1], np.array([1.5, 2.5]), decimal=1)
pred = gmr.predict(x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_less(mse, 0.01)
def test_sklearn_regression_with_2d_input():
"""Test regression with GaussianMixtureRegressor and two-dimensional input."""
try:
from gmr.sklearn import GaussianMixtureRegressor
except ImportError:
raise SkipTest("sklearn is not available")
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
noise = random_state.randn(n_samples, 1) * 0.01
y = np.vstack((y1, y2)) + noise
gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)
gmr.fit(x, y)
pred = gmr.predict(x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_less(mse, 0.01)
def test_sklearn_regression_with_1d_output():
"""Test regression with GaussianMixtureRegressor and two-dimensional input."""
try:
from gmr.sklearn import GaussianMixtureRegressor
except ImportError:
raise SkipTest("sklearn is not available")
random_state = check_random_state(0)
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y = 3 * x + 1
y = y.flatten()
gmr = GaussianMixtureRegressor(n_components=1, random_state=random_state)
gmr.fit(x, y)
pred = gmr.predict(x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_greater(mse, 0.01)
def test_sklearn_regression_without_noise():
"""Test regression without noise."""
try:
from gmr.sklearn import GaussianMixtureRegressor
except ImportError:
raise SkipTest("sklearn is not available")
random_state = 0
n_samples = 200
x = np.linspace(0, 2, n_samples)[:, np.newaxis]
y1 = 3 * x[:n_samples // 2] + 1
y2 = -3 * x[n_samples // 2:] + 7
y = np.vstack((y1, y2))
gmr = GaussianMixtureRegressor(n_components=2, random_state=random_state)
gmr.fit(x, y)
assert_array_almost_equal(gmr.gmm_.priors, 0.5 * np.ones(2), decimal=2)
assert_array_almost_equal(gmr.gmm_.means[0], np.array([1.5, 2.5]), decimal=2)
assert_array_almost_equal(gmr.gmm_.means[1], np.array([0.5, 2.5]), decimal=1)
pred = gmr.predict(x)
mse = np.sum((y - pred) ** 2) / n_samples
assert_less(mse, 0.01)
| 32.161905
| 82
| 0.668937
| 502
| 3,377
| 4.310757
| 0.14741
| 0.073937
| 0.033272
| 0.071165
| 0.840111
| 0.834104
| 0.834104
| 0.818392
| 0.818392
| 0.818392
| 0
| 0.043834
| 0.202843
| 3,377
| 104
| 83
| 32.471154
| 0.76003
| 0.066035
| 0
| 0.75
| 0
| 0
| 0.030642
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.052632
| false
| 0
| 0.171053
| 0
| 0.223684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e80c070fae5a6faf2f73bd135663726cf653b4dc
| 1,849
|
py
|
Python
|
pytest_strings/__init__.py
|
eddwardo/pytest_strings
|
f73f1ce3ef823a05674d43160287f3afe7d6d3b7
|
[
"MIT"
] | null | null | null |
pytest_strings/__init__.py
|
eddwardo/pytest_strings
|
f73f1ce3ef823a05674d43160287f3afe7d6d3b7
|
[
"MIT"
] | 1
|
2017-04-30T20:12:36.000Z
|
2017-04-30T20:12:36.000Z
|
pytest_strings/__init__.py
|
eddwardo/pytest_strings
|
f73f1ce3ef823a05674d43160287f3afe7d6d3b7
|
[
"MIT"
] | null | null | null |
import pytest
from pytest_strings import strings
@pytest.fixture(params=strings.noughty_string())
def noughty_string(request):
return request.param
@pytest.fixture(params=strings.types['number'])
def number_string(request):
return request.param
@pytest.fixture(params=strings.types['null'])
def null_strings(request):
return request.param
@pytest.fixture(params=strings.types['escape'])
def escape_string(request):
return request.param
@pytest.fixture(params=strings.types['truth'])
def truth_string(request):
return request.param
@pytest.fixture(params=strings.types['false'])
def false_string(request):
return request.param
@pytest.fixture(params=strings.types['truth'] + strings.types['false'])
def bool_string(request):
return request.param
@pytest.fixture(params=strings.types['emoji'])
def emoji_string(request):
return request.param
@pytest.fixture(params=strings.types['asian'])
def asian_string(request):
return request.param
@pytest.fixture(params=strings.types['arabic'])
def arabic_string(request):
return request.param
@pytest.fixture(params=strings.types['arabic'] + strings.types['asian'])
def nonlatin_string(request):
return request.param
@pytest.fixture(params=strings.types['xss'])
def xss_string(request):
return request.param
@pytest.fixture(params=strings.types['eval_injection'])
def eval_injection_string(request):
return request.param
@pytest.fixture(params=strings.types['sql_injection'])
def sql_injection_string(request):
return request.param
@pytest.fixture(params=strings.types['simple_ascii'])
def simple_ascii_string(request):
return request.param
@pytest.fixture(params=strings.types['html'])
def html_string(request):
return request.param
@pytest.fixture(params=strings.types['utf'])
def utf_string(request):
return request.param
| 26.042254
| 72
| 0.768524
| 240
| 1,849
| 5.816667
| 0.129167
| 0.154728
| 0.231375
| 0.316619
| 0.757163
| 0.734957
| 0.734957
| 0.734957
| 0.734957
| 0.694842
| 0
| 0
| 0.09735
| 1,849
| 70
| 73
| 26.414286
| 0.836429
| 0
| 0
| 0.320755
| 0
| 0
| 0.060573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.320755
| false
| 0
| 0.037736
| 0.320755
| 0.679245
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
1c432aaff07554254b56f50f567f20d8c2595cdc
| 7,611
|
py
|
Python
|
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 1
|
2022-01-26T10:55:59.000Z
|
2022-01-26T10:55:59.000Z
|
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | null | null | null |
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 2
|
2021-10-14T22:53:57.000Z
|
2022-01-26T10:53:32.000Z
|
# -*- coding: UTF-8 -*-
"""
Software: LingFeat - Comprehensive Linguistic Features for Readability Assessment
Page: PhrF.py (Phrasal Features)
License: CC-BY-SA 4.0
Original Author: Bruce W. Lee (이웅성) @brucewlee
Affiliation 1: LXPER AI, Seoul, South Korea
Affiliation 2: University of Pennsylvania, PA, USA
Contributing Author: -
Affiliation : -
References:
>>> Phrasal features inspired by
Publication 1: Feng, Lijun, Martin Jansche, Matt Huenerfauth, and Noémie Elhadad. "A Comparison of Features for Automatic Readability Assessment." In Coling 2010: Posters, pp. 276-284. 2010.
Publication 2: Lu, Xiaofei. "Automatic analysis of syntactic complexity in second language writing." International journal of corpus linguistics 15, no. 4 (2010): 474-496.
"""
from ..utils import division
def retrieve(SuPar, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for sent in sent_token_list:
dataset = SuPar.predict([sent], prob=True, verbose=False)
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
def retrieve_supar_optimized(dataset_list, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for idx, sent in enumerate(sent_token_list):
dataset = dataset_list[idx]
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
| 46.127273
| 190
| 0.681119
| 1,275
| 7,611
| 3.590588
| 0.120784
| 0.110092
| 0.256881
| 0.293578
| 0.831804
| 0.831804
| 0.831804
| 0.831804
| 0.831804
| 0.831804
| 0
| 0.007417
| 0.185127
| 7,611
| 165
| 191
| 46.127273
| 0.730732
| 0.098673
| 0
| 0.933333
| 0
| 0
| 0.145338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014815
| false
| 0
| 0.007407
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
98ce60036737aecc10ca2fffea28a7fbdb899987
| 192
|
py
|
Python
|
code/exp.py
|
campfireman/bachelor-thesis
|
1d78b0a90d352af916ee7eab1fc2539362586255
|
[
"MIT"
] | null | null | null |
code/exp.py
|
campfireman/bachelor-thesis
|
1d78b0a90d352af916ee7eab1fc2539362586255
|
[
"MIT"
] | null | null | null |
code/exp.py
|
campfireman/bachelor-thesis
|
1d78b0a90d352af916ee7eab1fc2539362586255
|
[
"MIT"
] | null | null | null |
# from src.experiments.read_old_checkpoints import main
# from src.experiments.generate_experience import main
from src.experiments.warm_up import main
if __name__ == '__main__':
main()
| 24
| 55
| 0.791667
| 26
| 192
| 5.384615
| 0.538462
| 0.15
| 0.385714
| 0.242857
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130208
| 192
| 7
| 56
| 27.428571
| 0.838323
| 0.552083
| 0
| 0
| 1
| 0
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
98e23b0dbfe92b4cd12b144833f3f7ff954088f1
| 3,307
|
py
|
Python
|
tests/file_io/sqlite_blob_file_io.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:47:16.000Z
|
2019-08-28T23:47:16.000Z
|
tests/file_io/sqlite_blob_file_io.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | null | null | null |
tests/file_io/sqlite_blob_file_io.py
|
Acidburn0zzz/dfvfs
|
3db8c4e520e3e7527faffeea8f52187c861fa3b6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the SQLite blob file-like object."""
from __future__ import unicode_literals
import unittest
from dfvfs.file_io import sqlite_blob_file_io
from dfvfs.path import sqlite_blob_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from tests.file_io import test_lib
class SQLiteBlobFileWithConditionTest(test_lib.SylogTestCase):
"""The unit test for a SQLite blob file-like object using row condition."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.db'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._sqlite_blob_path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name='blobs', column_name='blob',
row_condition=('identifier', '==', 'myblob'), parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
class SQLiteBlobFileWithIndexTest(test_lib.SylogTestCase):
"""The unit test for a SQLite blob file-like object using row index."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.db'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._sqlite_blob_path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name='blobs', column_name='blob', row_index=0, parent=path_spec)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close()
def testSeek(self):
"""Test the seek functionality."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestSeekFileObject(file_object)
file_object.close()
def testRead(self):
"""Test the read functionality."""
file_object = sqlite_blob_file_io.SQLiteBlobFile(self._resolver_context)
file_object.open(path_spec=self._sqlite_blob_path_spec)
self._TestReadFileObject(file_object)
file_object.close()
if __name__ == '__main__':
unittest.main()
| 32.421569
| 78
| 0.762927
| 432
| 3,307
| 5.474537
| 0.185185
| 0.081184
| 0.060888
| 0.083721
| 0.840592
| 0.830444
| 0.830444
| 0.830444
| 0.830444
| 0.830444
| 0
| 0.000703
| 0.139401
| 3,307
| 101
| 79
| 32.742574
| 0.830288
| 0.174176
| 0
| 0.75
| 0
| 0
| 0.023186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.125
| 0
| 0.303571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.