hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b363d071b7b96e4d83f123f29e5e591eea11591
| 2,977
|
py
|
Python
|
tests/features/test_pipeline_loading.py
|
albact7/MLBlocks
|
e555e2740f8f316b438983f15b620bcfb54fb838
|
[
"MIT"
] | 78
|
2018-06-06T02:34:18.000Z
|
2020-09-16T15:27:21.000Z
|
tests/features/test_pipeline_loading.py
|
albact7/MLBlocks
|
e555e2740f8f316b438983f15b620bcfb54fb838
|
[
"MIT"
] | 92
|
2018-05-17T16:27:18.000Z
|
2020-09-16T13:50:04.000Z
|
tests/features/test_pipeline_loading.py
|
albact7/MLBlocks
|
e555e2740f8f316b438983f15b620bcfb54fb838
|
[
"MIT"
] | 32
|
2018-06-15T01:59:55.000Z
|
2020-08-06T16:04:35.000Z
|
from unittest import TestCase
from mlblocks import MLPipeline
class TestMLPipeline(TestCase):
def test_dict(self):
pipeline_dict = {
'primitives': [
'sklearn.ensemble.RandomForestClassifier'
],
'init_params': {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
},
'input_names': {
'sklearn.ensemble.RandomForest#1': {
'X': 'X1'
}
},
'output_names': {
'sklearn.ensemble.RandomForest#1': {
'y': 'y1'
}
}
}
pipeline = MLPipeline(pipeline_dict)
assert pipeline.primitives == ['sklearn.ensemble.RandomForestClassifier']
assert pipeline.init_params == {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
assert pipeline.input_names == {
'sklearn.ensemble.RandomForest#1': {
'X': 'X1'
}
}
assert pipeline.output_names == {
'sklearn.ensemble.RandomForest#1': {
'y': 'y1'
}
}
def test_list(self):
primitives = [
'sklearn.ensemble.RandomForestClassifier'
]
init_params = {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
pipeline = MLPipeline(primitives, init_params=init_params)
assert pipeline.primitives == ['sklearn.ensemble.RandomForestClassifier']
assert pipeline.init_params == {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
def test_none(self):
primitives = [
'sklearn.ensemble.RandomForestClassifier'
]
init_params = {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
pipeline = MLPipeline(primitives=primitives, init_params=init_params)
assert pipeline.primitives == ['sklearn.ensemble.RandomForestClassifier']
assert pipeline.init_params == {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
def test_mlpipeline(self):
primitives = [
'sklearn.ensemble.RandomForestClassifier'
]
init_params = {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
pipeline = MLPipeline(primitives=primitives, init_params=init_params)
pipeline2 = MLPipeline(pipeline)
assert pipeline2.primitives == ['sklearn.ensemble.RandomForestClassifier']
assert pipeline2.init_params == {
'sklearn.ensemble.RandomForest#1': {
'n_estimators': 500
}
}
| 28.084906
| 82
| 0.507894
| 215
| 2,977
| 6.883721
| 0.162791
| 0.202703
| 0.218919
| 0.227027
| 0.836486
| 0.800676
| 0.800676
| 0.800676
| 0.688514
| 0.653378
| 0
| 0.023678
| 0.38999
| 2,977
| 105
| 83
| 28.352381
| 0.7913
| 0
| 0
| 0.466667
| 0
| 0
| 0.28082
| 0.229762
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.044444
| false
| 0
| 0.022222
| 0
| 0.077778
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4b51baad578d8a20ad0d8f5c7db7a9e21e526f69
| 9,887
|
py
|
Python
|
assertpy/base.py
|
rascaler/assertpy
|
668719d3f034475d95de0f0a0cb680c7cfa8b43c
|
[
"Apache-2.0"
] | null | null | null |
assertpy/base.py
|
rascaler/assertpy
|
668719d3f034475d95de0f0a0cb680c7cfa8b43c
|
[
"Apache-2.0"
] | null | null | null |
assertpy/base.py
|
rascaler/assertpy
|
668719d3f034475d95de0f0a0cb680c7cfa8b43c
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod, ABC
class ExceptionConvertor(metaclass=ABCMeta):
@abstractmethod
def get_exception(self, obj):
pass
class Comparator(metaclass=ABCMeta):
@abstractmethod
def compare(self, left, right) -> int:
pass
class Assert(metaclass=ABCMeta):
@abstractmethod
def is_none(self):
pass
@abstractmethod
def is_not_none(self):
pass
@abstractmethod
def is_equal_to(self, expected):
pass
@abstractmethod
def is_in(self, values):
pass
@abstractmethod
def is_not_in(self, values):
pass
@abstractmethod
def then_fail_throw(self, obj, format_msg, arguments):
pass
class ComparableAssert(metaclass=ABCMeta):
@abstractmethod
def is_equal_to(self, expected):
pass
@abstractmethod
def is_less_than(self, boundary):
pass
@abstractmethod
def is_less_than_or_equal_to(self, boundary):
pass
@abstractmethod
def is_greater_than(self, boundary):
pass
@abstractmethod
def is_greater_than_or_equal_to(self, boundary):
pass
@abstractmethod
def is_between(self, start_inclusive_boundary, end_inclusive_boundary):
pass
@abstractmethod
def is_strictly_between(self, start_exclusive_boundary, end_exclusive_boundary):
pass
@abstractmethod
def is_start_inclusive_between(self, start_inclusive_boundary, end_exclusive_boundary):
pass
@abstractmethod
def is_end_inclusive_between(self, start_exclusive_boundary, end_inclusive_boundary):
pass
class SizeComparableAssert(metaclass=ABCMeta):
@abstractmethod
def has_one_size(self):
pass
@abstractmethod
def has_more_than_one_size(self):
pass
@abstractmethod
def is_size_equal_to(self, boundary):
pass
@abstractmethod
def is_size_less_than(self, boundary):
pass
@abstractmethod
def is_size_less_than_or_equal_to(self, boundary):
pass
@abstractmethod
def is_size_greater_than(self, boundary):
pass
@abstractmethod
def is_size_greater_than_or_equal_to(self, boundary):
pass
@abstractmethod
def is_size_between(self, start_inclusive_boundary, end_inclusive_boundary):
pass
@abstractmethod
def is_size_strictly_between(self, start_exclusive_boundary, end_exclusive_boundary):
pass
@abstractmethod
def is_size_start_inclusive_between(self, start_inclusive_boundary, end_exclusive_boundary):
pass
@abstractmethod
def is_size_end_inclusive_between(self, start_exclusive_boundary, end_inclusive_boundary):
pass
class AbstractAssert(Assert):
_exception_mapping = {}
def __init__(self, actual) -> None:
self.log = None
self.actual = actual
self.passed = True
def is_none(self):
if not self.passed:
return self
self.passed = self.actual is None
return self
def is_not_none(self):
if not self.passed:
return self
self.passed = self.actual is not None
return self
def is_equal_to(self, expected):
if not self.passed:
return self
self.passed = self.actual == expected
return self
def is_not_equal_to(self, expected):
if not self.passed:
return self
self.passed = not (self.actual == expected)
return self
def is_in(self, values):
if not self.passed:
return self
for value in values:
if self.actual == value:
self.passed = True
return self
self.passed = False
return self
def is_not_in(self, values):
if not self.passed:
return self
for value in values:
if self.actual == value:
self.passed = False
return self
self.passed = True
return self
def then_fail_throw(self, obj, format_msg=None, arguments=None):
if self.passed:
return self
self._write_custom_log(format_msg, arguments)
if isinstance(obj, Exception):
raise obj
convertor = AbstractAssert._exception_mapping[type(obj)]
raise convertor.get_exception(obj)
def _write_custom_log(self, format_msg=None, arguments=None):
if not format_msg:
return
if not arguments:
print(format_msg)
print(format_msg % arguments)
@staticmethod
def add_exception_convertor(code_type, convertor):
if code_type in AbstractAssert._exception_mapping:
raise Exception('convertor for %s has already existed' % code_type)
AbstractAssert._exception_mapping[code_type] = convertor
def get_result(self):
return self.passed
class AbstractComparableAssert(ComparableAssert, AbstractAssert, Comparator, ABC):
def __init__(self, actual):
super(AbstractComparableAssert, self).__init__(actual)
def is_equal_to(self, expected):
if not self.passed:
return self
self.passed = self.actual == expected
return self
def is_less_than(self, boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, boundary) < 0
return self
def is_less_than_or_equal_to(self, boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, boundary) <= 0
return self
def is_greater_than(self, boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, boundary) > 0
return self
def is_greater_than_or_equal_to(self, boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, boundary) >= 0
return self
def is_between(self, start_inclusive_boundary, end_inclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, start_inclusive_boundary) >= 0 and self.compare(self.actual, end_inclusive_boundary) <= 0
return self
def is_strictly_between(self, start_exclusive_boundary, end_exclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, start_exclusive_boundary) > 0 and self.compare(self.actual, end_exclusive_boundary) < 0
return self
def is_start_inclusive_between(self, start_inclusive_boundary, end_exclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, start_inclusive_boundary) >= 0 and self.compare(self.actual, end_exclusive_boundary) < 0
return self
def is_end_inclusive_between(self, start_exclusive_boundary, end_inclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.actual, start_exclusive_boundary) > 0 and self.compare(self.actual, end_inclusive_boundary) <= 0
return self
class AbstractSizeComparableAssert(SizeComparableAssert, AbstractAssert, Comparator, ABC):
def __init__(self, actual):
super(AbstractSizeComparableAssert, self).__init__(actual)
@abstractmethod
def size(self):
pass
def has_one_size(self):
if not self.passed:
return self
self.passed = self.size() == 1
return self
def has_more_than_one_size(self):
if not self.passed:
return self
self.passed = self.size() > 1
return self
def is_size_equal_to(self, other):
if not self.passed:
return self
self.passed = self.size() == other
return self
def is_size_less_than(self, other):
if not self.passed:
return self
self.passed = self.compare(self.size(), other) < 0
return self
def is_size_less_than_or_equal_to(self, other):
if not self.passed:
return self
self.passed = self.compare(self.size(), other) <= 0
return self
def is_size_greater_than(self, other):
if not self.passed:
return self
self.passed = self.compare(self.size(), other) > 0
return self
def is_size_greater_than_or_equal_to(self, other):
if not self.passed:
return self
self.passed = self.compare(self.size(), other) >= 0
return self
def is_size_between(self, start_inclusive_boundary, end_inclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.size(), start_inclusive_boundary) >= 0 and self.compare(self.size(), end_inclusive_boundary) <= 0
return self
def is_size_strictly_between(self, start_exclusive_boundary, end_exclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.size(), start_exclusive_boundary) > 0 and self.compare(self.size(), end_exclusive_boundary) < 0
return self
def is_size_start_inclusive_between(self, start_inclusive_boundary, end_exclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.size(), start_inclusive_boundary) >= 0 and self.compare(self.size(), end_exclusive_boundary) < 0
return self
def is_size_end_inclusive_between(self, start_exclusive_boundary, end_inclusive_boundary):
if not self.passed:
return self
self.passed = self.compare(self.size(), start_exclusive_boundary) > 0 and self.compare(self.size(), end_inclusive_boundary) <= 0
return self
def compare(self, left, right):
return left - right
| 28.741279
| 137
| 0.654395
| 1,199
| 9,887
| 5.146789
| 0.075897
| 0.092368
| 0.070005
| 0.087506
| 0.82515
| 0.784638
| 0.747043
| 0.732134
| 0.693081
| 0.666991
| 0
| 0.003745
| 0.27076
| 9,887
| 343
| 138
| 28.825073
| 0.85215
| 0.004248
| 0
| 0.721805
| 0
| 0
| 0.003659
| 0
| 0
| 0
| 0
| 0
| 0.041353
| 1
| 0.236842
| false
| 0.323308
| 0.003759
| 0.007519
| 0.492481
| 0.007519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
2998c362fe6a57b38f89b7b6efc9675a169faf72
| 3,066
|
py
|
Python
|
PLC/PlcValidator.py
|
dangxuanvuong98/pineapples_harvester
|
e53f5a681a2f128383215b4b1fd85a5f728bb676
|
[
"MIT"
] | null | null | null |
PLC/PlcValidator.py
|
dangxuanvuong98/pineapples_harvester
|
e53f5a681a2f128383215b4b1fd85a5f728bb676
|
[
"MIT"
] | 7
|
2020-09-25T22:35:33.000Z
|
2022-03-12T00:20:31.000Z
|
PLC/PlcValidator.py
|
dangxuanvuong98/pineapples_harvester
|
e53f5a681a2f128383215b4b1fd85a5f728bb676
|
[
"MIT"
] | null | null | null |
'''
File này gồm 2 hàm kiểm tra tọa độ trước khi gửi qua serial
'''
# kiểm tra tọa độ có hợp lệ không
def plc1CoordinateValidator(raw_x, raw_y, raw_z):
#y = int(raw_y)-59-21, -59 (mép ngoài) là khoảng cách từ camera đến khung, 21 từ khung đến trục thân xilanh trục y
#231 khoảng cách hai mép trong, 59 từ cam đến mép trong, 23 từ mép trong đến cánh tay
y = 275 - int(raw_y) # chieu truc X cua camera# doi tu toa do cam sang toa do khung PLC1 #80
#x = 100-20, 100 là giới hạn một nửa khoảng thu hoạch (mép trong), 20 thân xylanh đến khung theo trục x
#14 từ cánh tay đến mép trong
x = 87 + int(raw_x) # chieu truc Y cua camera # doi tu toa do cam sang toa do khung PLC1 #184
if y < 0 and abs(y) <= 5:
y = 0
if y > 180 and y < 210 : #gán giới hạn trên trục Y
y = 185
if y > 43 and y < 58 : #gán giới hạn dưới1 trục Y
y = 51
if y >=58 and y < 65 : # gán giới hạn dưới2 trục Y
y = 55
if x <=10 : # gán giới hạn dưới trục X
x = 3
if y > 170 and x < 30: # gán giới hạn quả ngoài cùng hàng 1( gần cammera nhất)
y = 185
x = 5
if y > 145 and y<= 166 and x <= 18: # giới hạn quả ngoài cùng hàng 2
y = 145
x = 5
if int(raw_z) < 70 :
z = 3
if int(raw_z) >= 70 and int(raw_z) <= 80 :
z = 4
if int(raw_z) > 80:
z = 5
if 51 <= y <= 185 and 0 <= x <= 87:
# Nếu nằm trong tầm cắt trả về tọa độ
return {'x': x, 'y': y, 'z': z}
# Không nằm trong tầm cắt thì không trả về gì
return None
# kiểm tra tọa độ có hợp lệ không
def plc2CoordinateValidator(raw_x, raw_y, raw_z):
#y = int(raw_y)-59-21, -59 (mép ngoài) là khoảng cách từ camera đến khung, 21 từ khung đến trục thân xilanh trục y
#231 khoảng cách hai mép trong, 59 từ cam đến mép trong, 23 từ mép trong đến cánh tay
y = 275 - int(raw_y) # chieu truc X cua camera# doi tu toa do cam sang toa do khung PLC1 #80
#x = 100-20, 100 là giới hạn một nửa khoảng thu hoạch (mép trong), 20 thân xylanh đến khung theo trục x
#14 từ cánh tay đến mép trong
x = 87 - int(raw_x) # chieu truc Y cua camera # doi tu toa do cam sang toa do khung PLC1 #184
if y < 0 and abs(y) <= 5:
y = 0
if y > 180 and y < 210 : #gán giới hạn trên trục Y
y = 185
if y > 43 and y < 58 : #gán giới hạn dưới1 trục Y
y = 51
if y >=58 and y < 65 : # gán giới hạn dưới2 trục Y
y = 55
if x <=10 : # gán giới hạn dưới trục X
x = 3
if y > 170 and x < 30: # gán giới hạn quả ngoài cùng hàng 1( gần cammera nhất)
y = 185
x = 5
if y > 145 and y<= 166 and x <= 18: # giới hạn quả ngoài cùng hàng 2
y = 145
x = 5
if int(raw_z) < 70 :
z = 3
if int(raw_z) >= 70 and int(raw_z) <= 80 :
z = 4
if int(raw_z) > 80:
z = 5
if 51 <= y <= 185 and 0 <= x <= 75:
# Nếu nằm trong tầm cắt trả về tọa độ
return {'x': x, 'y': y, 'z': z}
# Không nằm trong tầm cắt thì không trả về gì
return None
| 41.432432
| 118
| 0.563601
| 606
| 3,066
| 2.818482
| 0.191419
| 0.04918
| 0.058548
| 0.031616
| 0.943794
| 0.943794
| 0.943794
| 0.943794
| 0.943794
| 0.912178
| 0
| 0.111723
| 0.348989
| 3,066
| 74
| 119
| 41.432432
| 0.743988
| 0.516634
| 0
| 0.892857
| 0
| 0
| 0.004172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29b1c2c2bf2d0ade833b546149dfe658e968ef84
| 3,217
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowClnsIsNeighborsDetail/cli/equal/golden_output_3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowClnsIsNeighborsDetail/cli/equal/golden_output_3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowClnsIsNeighborsDetail/cli/equal/golden_output_3_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"tag": {
"test": {
"system_id": {
"R2_xr": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.01",
"format": "Phase V",
"interface": "GigabitEthernet2.115",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.115",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
"test1": {
"system_id": {
"2222.22ff.4444": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "2222.22ff.4444.01",
"format": "Phase V",
"interface": "GigabitEthernet2.415",
"ip_address": ["10.12.115.2*"],
"ipv6_address": ["FE80::F816:3EFF:FE67:2452"],
"nsf": "capable",
"priority": 128,
"state": "init",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
"R3_nx": {
"type": {
"L1L2": {
"area_address": ["49.0001"],
"circuit_id": "R1_xe.02",
"format": "Phase V",
"interface": "GigabitEthernet3.415",
"ip_address": ["10.13.115.3*"],
"ipv6_address": ["FE80::5C01:FF:FE02:7"],
"nsf": "capable",
"priority": 64,
"state": "up",
"topology": ["ipv4", "ipv6"],
"uptime": "3d04h",
}
}
},
}
},
}
}
| 39.716049
| 74
| 0.254896
| 183
| 3,217
| 4.344262
| 0.333333
| 0.040252
| 0.060377
| 0.095597
| 0.881761
| 0.881761
| 0.783648
| 0.783648
| 0.740881
| 0.740881
| 0
| 0.148499
| 0.606466
| 3,217
| 80
| 75
| 40.2125
| 0.479463
| 0
| 0
| 0.6
| 0
| 0
| 0.264843
| 0.015542
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
29f0323a54f8e96513e1c1019c896d7518a8c1da
| 194
|
py
|
Python
|
ailabtools/keras/__init__.py
|
RyanDam/ailabtools
|
5693a7e59b3c6ff94d99b9aba7bde7239c665a45
|
[
"MIT"
] | null | null | null |
ailabtools/keras/__init__.py
|
RyanDam/ailabtools
|
5693a7e59b3c6ff94d99b9aba7bde7239c665a45
|
[
"MIT"
] | null | null | null |
ailabtools/keras/__init__.py
|
RyanDam/ailabtools
|
5693a7e59b3c6ff94d99b9aba7bde7239c665a45
|
[
"MIT"
] | null | null | null |
from .pairgenerator import load_img_func
from .pairgenerator import PairDataGenerator
from .classify_trainer import train_classifier, train_zaco_classifier
from .tflite_model import TFLiteModel
| 38.8
| 69
| 0.886598
| 24
| 194
| 6.875
| 0.625
| 0.206061
| 0.278788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087629
| 194
| 4
| 70
| 48.5
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4b17affa6d45e50ea3fc084a30dd5de20288ed2d
| 117,314
|
py
|
Python
|
swagger_client/apis/booking_api.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/booking_api.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/booking_api.py
|
scubawhere/scubawhere-api-python-client
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BookingApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_booking_detail(self, booking_id, customer_id, **kwargs):
"""
Add a package / course / ticket with its session to the booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_booking_detail(booking_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int customer_id: (required)
:param int ticket_id:
:param int session_id:
:param int boatroom_id:
:param int training_session_id:
:param bool temporary:
:param int package_id:
:param int packagefacade_id:
:param int course_id:
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_booking_detail_with_http_info(booking_id, customer_id, **kwargs)
else:
(data) = self.add_booking_detail_with_http_info(booking_id, customer_id, **kwargs)
return data
def add_booking_detail_with_http_info(self, booking_id, customer_id, **kwargs):
"""
Add a package / course / ticket with its session to the booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_booking_detail_with_http_info(booking_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int customer_id: (required)
:param int ticket_id:
:param int session_id:
:param int boatroom_id:
:param int training_session_id:
:param bool temporary:
:param int package_id:
:param int packagefacade_id:
:param int course_id:
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'customer_id', 'ticket_id', 'session_id', 'boatroom_id', 'training_session_id', 'temporary', 'package_id', 'packagefacade_id', 'course_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_booking_detail" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `add_booking_detail`")
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `add_booking_detail`")
resource_path = '/booking/add-detail'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
if 'ticket_id' in params:
query_params['ticket_id'] = params['ticket_id']
if 'session_id' in params:
query_params['session_id'] = params['session_id']
if 'boatroom_id' in params:
query_params['boatroom_id'] = params['boatroom_id']
if 'training_session_id' in params:
query_params['training_session_id'] = params['training_session_id']
if 'temporary' in params:
query_params['temporary'] = params['temporary']
if 'package_id' in params:
query_params['package_id'] = params['package_id']
if 'packagefacade_id' in params:
query_params['packagefacade_id'] = params['packagefacade_id']
if 'course_id' in params:
query_params['course_id'] = params['course_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20010',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def attach_accommodation(self, booking_id, accommodation_id, customer_id, **kwargs):
"""
Attach an accommodation booking to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_accommodation(booking_id, accommodation_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int accommodation_id: (required)
:param int customer_id: (required)
:param date start:
:param date end:
:return: InlineResponse2008
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs)
else:
(data) = self.attach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs)
return data
def attach_accommodation_with_http_info(self, booking_id, accommodation_id, customer_id, **kwargs):
"""
Attach an accommodation booking to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int accommodation_id: (required)
:param int customer_id: (required)
:param date start:
:param date end:
:return: InlineResponse2008
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'accommodation_id', 'customer_id', 'start', 'end']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_accommodation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `attach_accommodation`")
# verify the required parameter 'accommodation_id' is set
if ('accommodation_id' not in params) or (params['accommodation_id'] is None):
raise ValueError("Missing the required parameter `accommodation_id` when calling `attach_accommodation`")
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `attach_accommodation`")
resource_path = '/booking/add-accommodation'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'accommodation_id' in params:
query_params['accommodation_id'] = params['accommodation_id']
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
if 'start' in params:
query_params['start'] = params['start']
if 'end' in params:
query_params['end'] = params['end']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2008',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def attach_addon(self, booking_id, bookingdetail_id, addon_id, **kwargs):
"""
Attach an addon to a trip of a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_addon(booking_id, bookingdetail_id, addon_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:param int addon_id: (required)
:param int quantity:
:param int packagefacade_id:
:return: InlineResponse2009
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs)
else:
(data) = self.attach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs)
return data
def attach_addon_with_http_info(self, booking_id, bookingdetail_id, addon_id, **kwargs):
"""
Attach an addon to a trip of a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:param int addon_id: (required)
:param int quantity:
:param int packagefacade_id:
:return: InlineResponse2009
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'bookingdetail_id', 'addon_id', 'quantity', 'packagefacade_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `attach_addon`")
# verify the required parameter 'bookingdetail_id' is set
if ('bookingdetail_id' not in params) or (params['bookingdetail_id'] is None):
raise ValueError("Missing the required parameter `bookingdetail_id` when calling `attach_addon`")
# verify the required parameter 'addon_id' is set
if ('addon_id' not in params) or (params['addon_id'] is None):
raise ValueError("Missing the required parameter `addon_id` when calling `attach_addon`")
resource_path = '/booking/add-addon'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'bookingdetail_id' in params:
query_params['bookingdetail_id'] = params['bookingdetail_id']
if 'addon_id' in params:
query_params['addon_id'] = params['addon_id']
if 'quantity' in params:
query_params['quantity'] = params['quantity']
if 'packagefacade_id' in params:
query_params['packagefacade_id'] = params['packagefacade_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2009',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def attach_pickup(self, booking_id, location, date, time, **kwargs):
"""
Attach a pickup location for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_pickup(booking_id, location, date, time, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param str location: (required)
:param date date: (required)
:param str time: (required)
:return: InlineResponse20011
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_pickup_with_http_info(booking_id, location, date, time, **kwargs)
else:
(data) = self.attach_pickup_with_http_info(booking_id, location, date, time, **kwargs)
return data
def attach_pickup_with_http_info(self, booking_id, location, date, time, **kwargs):
"""
Attach a pickup location for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_pickup_with_http_info(booking_id, location, date, time, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param str location: (required)
:param date date: (required)
:param str time: (required)
:return: InlineResponse20011
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'location', 'date', 'time']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_pickup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `attach_pickup`")
# verify the required parameter 'location' is set
if ('location' not in params) or (params['location'] is None):
raise ValueError("Missing the required parameter `location` when calling `attach_pickup`")
# verify the required parameter 'date' is set
if ('date' not in params) or (params['date'] is None):
raise ValueError("Missing the required parameter `date` when calling `attach_pickup`")
# verify the required parameter 'time' is set
if ('time' not in params) or (params['time'] is None):
raise ValueError("Missing the required parameter `time` when calling `attach_pickup`")
resource_path = '/booking/add-pickup'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'location' in params:
query_params['location'] = params['location']
if 'date' in params:
query_params['date'] = params['date']
if 'time' in params:
query_params['time'] = params['time']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20011',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def cancel_booking(self, booking_id, **kwargs):
"""
Cancel a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_booking(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cancel_booking_with_http_info(booking_id, **kwargs)
else:
(data) = self.cancel_booking_with_http_info(booking_id, **kwargs)
return data
def cancel_booking_with_http_info(self, booking_id, **kwargs):
"""
Cancel a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cancel_booking_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `cancel_booking`")
resource_path = '/booking/cancel'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def confirm_booking(self, booking_id, **kwargs):
"""
Confirm a booking and all of its sessions and notify the lead customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.confirm_booking(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse20012
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.confirm_booking_with_http_info(booking_id, **kwargs)
else:
(data) = self.confirm_booking_with_http_info(booking_id, **kwargs)
return data
def confirm_booking_with_http_info(self, booking_id, **kwargs):
"""
Confirm a booking and all of its sessions and notify the lead customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.confirm_booking_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse20012
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method confirm_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `confirm_booking`")
resource_path = '/booking/confirm'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20012',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_booking(self, id, **kwargs):
"""
Delete a booking by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_booking(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_booking_with_http_info(id, **kwargs)
else:
(data) = self.delete_booking_with_http_info(id, **kwargs)
return data
def delete_booking_with_http_info(self, id, **kwargs):
"""
Delete a booking by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_booking_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_booking`")
resource_path = '/booking/delete'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def dettach_accommodation(self, booking_id, accommodation_id, customer_id, **kwargs):
"""
Dettach an accommodation booking to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_accommodation(booking_id, accommodation_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int accommodation_id: (required)
:param int customer_id: (required)
:param date start:
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs)
else:
(data) = self.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, **kwargs)
return data
def dettach_accommodation_with_http_info(self, booking_id, accommodation_id, customer_id, **kwargs):
"""
Dettach an accommodation booking to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_accommodation_with_http_info(booking_id, accommodation_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int accommodation_id: (required)
:param int customer_id: (required)
:param date start:
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'accommodation_id', 'customer_id', 'start']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dettach_accommodation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `dettach_accommodation`")
# verify the required parameter 'accommodation_id' is set
if ('accommodation_id' not in params) or (params['accommodation_id'] is None):
raise ValueError("Missing the required parameter `accommodation_id` when calling `dettach_accommodation`")
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `dettach_accommodation`")
resource_path = '/booking/remove-accommodation'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'accommodation_id' in params:
query_params['accommodation_id'] = params['accommodation_id']
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
if 'start' in params:
query_params['start'] = params['start']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20017',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def dettach_addon(self, booking_id, bookingdetail_id, addon_id, **kwargs):
"""
Dettach an addon to a trip of a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_addon(booking_id, bookingdetail_id, addon_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:param int addon_id: (required)
:param int packagefacade_id:
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs)
else:
(data) = self.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, **kwargs)
return data
def dettach_addon_with_http_info(self, booking_id, bookingdetail_id, addon_id, **kwargs):
"""
Dettach an addon to a trip of a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_addon_with_http_info(booking_id, bookingdetail_id, addon_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:param int addon_id: (required)
:param int packagefacade_id:
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'bookingdetail_id', 'addon_id', 'packagefacade_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dettach_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `dettach_addon`")
# verify the required parameter 'bookingdetail_id' is set
if ('bookingdetail_id' not in params) or (params['bookingdetail_id'] is None):
raise ValueError("Missing the required parameter `bookingdetail_id` when calling `dettach_addon`")
# verify the required parameter 'addon_id' is set
if ('addon_id' not in params) or (params['addon_id'] is None):
raise ValueError("Missing the required parameter `addon_id` when calling `dettach_addon`")
resource_path = '/booking/remove-addon'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'bookingdetail_id' in params:
query_params['bookingdetail_id'] = params['bookingdetail_id']
if 'addon_id' in params:
query_params['addon_id'] = params['addon_id']
if 'packagefacade_id' in params:
query_params['packagefacade_id'] = params['packagefacade_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20017',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def dettach_pickup(self, booking_id, **kwargs):
"""
Dettach a pickup location for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_pickup(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int id:
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.dettach_pickup_with_http_info(booking_id, **kwargs)
else:
(data) = self.dettach_pickup_with_http_info(booking_id, **kwargs)
return data
def dettach_pickup_with_http_info(self, booking_id, **kwargs):
"""
Dettach a pickup location for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dettach_pickup_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int id:
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dettach_pickup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `dettach_pickup`")
resource_path = '/booking/remove-pickup'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def edit_booking_info(self, **kwargs):
"""
Edit the information related to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_booking_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:param float discount:
:param str comment:
:return: InlineResponse20014
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.edit_booking_info_with_http_info(**kwargs)
else:
(data) = self.edit_booking_info_with_http_info(**kwargs)
return data
def edit_booking_info_with_http_info(self, **kwargs):
"""
Edit the information related to a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_booking_info_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:param float discount:
:param str comment:
:return: InlineResponse20014
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'discount', 'comment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_booking_info" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/edit-info'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'discount' in params:
query_params['discount'] = params['discount']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20014',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def filter_bookings(self, **kwargs):
"""
Get all bookings matching a filter
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.filter_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str reference:
:param date date:
:param str lastname:
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.filter_bookings_with_http_info(**kwargs)
else:
(data) = self.filter_bookings_with_http_info(**kwargs)
return data
def filter_bookings_with_http_info(self, **kwargs):
"""
Get all bookings matching a filter
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.filter_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str reference:
:param date date:
:param str lastname:
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reference', 'date', 'lastname']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method filter_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/filter'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'reference' in params:
query_params['reference'] = params['reference']
if 'date' in params:
query_params['date'] = params['date']
if 'lastname' in params:
query_params['lastname'] = params['lastname']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20013',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_bookings(self, **kwargs):
"""
Retrieve all bookings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_bookings_with_http_info(**kwargs)
else:
(data) = self.get_all_bookings_with_http_info(**kwargs)
return data
def get_all_bookings_with_http_info(self, **kwargs):
"""
Retrieve all bookings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/all'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Booking]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_with_trashed_bookings(self, **kwargs):
"""
Retrieve all bookings including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_trashed_bookings_with_http_info(**kwargs)
else:
(data) = self.get_all_with_trashed_bookings_with_http_info(**kwargs)
return data
def get_all_with_trashed_bookings_with_http_info(self, **kwargs):
"""
Retrieve all bookings including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Booking]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_with_trashed_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/all-with-trashed'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Booking]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_booking(self, id, **kwargs):
"""
Retrieve a booking by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_booking(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: InlineResponse2007
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_booking_with_http_info(id, **kwargs)
else:
(data) = self.get_booking_with_http_info(id, **kwargs)
return data
def get_booking_with_http_info(self, id, **kwargs):
"""
Retrieve a booking by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_booking_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: InlineResponse2007
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_booking`")
resource_path = '/booking'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2007',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_customer_bookings(self, customer_id, **kwargs):
"""
Get all bookings for a customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_customer_bookings(customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int customer_id: (required)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_customer_bookings_with_http_info(customer_id, **kwargs)
else:
(data) = self.get_customer_bookings_with_http_info(customer_id, **kwargs)
return data
def get_customer_bookings_with_http_info(self, customer_id, **kwargs):
"""
Get all bookings for a customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_customer_bookings_with_http_info(customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int customer_id: (required)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_bookings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_bookings`")
resource_path = '/booking/customer'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20013',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_payments(self, **kwargs):
"""
Retrieve all payments made for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_payments(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:return: InlineResponse20015
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_payments_with_http_info(**kwargs)
else:
(data) = self.get_payments_with_http_info(**kwargs)
return data
def get_payments_with_http_info(self, **kwargs):
"""
Retrieve all payments made for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_payments_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:return: InlineResponse20015
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payments" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/payments'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20015',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_refunds(self, **kwargs):
"""
Retrieve all refunds for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_refunds(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:return: InlineResponse20016
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_refunds_with_http_info(**kwargs)
else:
(data) = self.get_refunds_with_http_info(**kwargs)
return data
def get_refunds_with_http_info(self, **kwargs):
"""
Retrieve all refunds for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_refunds_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id:
:return: InlineResponse20016
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_refunds" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/refunds'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20016',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_todays_bookings(self, **kwargs):
"""
Get all bookings made today
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_todays_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_todays_bookings_with_http_info(**kwargs)
else:
(data) = self.get_todays_bookings_with_http_info(**kwargs)
return data
def get_todays_bookings_with_http_info(self, **kwargs):
"""
Get all bookings made today
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_todays_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_todays_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/today'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20013',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_tommorows_bookings(self, **kwargs):
"""
Get all bookings made today
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_tommorows_bookings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_tommorows_bookings_with_http_info(**kwargs)
else:
(data) = self.get_tommorows_bookings_with_http_info(**kwargs)
return data
def get_tommorows_bookings_with_http_info(self, **kwargs):
"""
Get all bookings made today
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_tommorows_bookings_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: InlineResponse20013
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tommorows_bookings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/booking/tommorow'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20013',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def init_booking(self, source, **kwargs):
"""
Create a new empty booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.init_booking(source, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str source: (required)
:param int agent_id:
:param str agent_reference:
:return: InlineResponse201
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.init_booking_with_http_info(source, **kwargs)
else:
(data) = self.init_booking_with_http_info(source, **kwargs)
return data
def init_booking_with_http_info(self, source, **kwargs):
"""
Create a new empty booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.init_booking_with_http_info(source, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str source: (required)
:param int agent_id:
:param str agent_reference:
:return: InlineResponse201
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['source', 'agent_id', 'agent_reference']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method init_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'source' is set
if ('source' not in params) or (params['source'] is None):
raise ValueError("Missing the required parameter `source` when calling `init_booking`")
resource_path = '/booking/init'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'source' in params:
query_params['source'] = params['source']
if 'agent_id' in params:
query_params['agent_id'] = params['agent_id']
if 'agent_reference' in params:
query_params['agent_reference'] = params['agent_reference']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse201',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def remove_booking_detail(self, booking_id, bookingdetail_id, **kwargs):
"""
Remove a detail from a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_booking_detail(booking_id, bookingdetail_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_booking_detail_with_http_info(booking_id, bookingdetail_id, **kwargs)
else:
(data) = self.remove_booking_detail_with_http_info(booking_id, bookingdetail_id, **kwargs)
return data
def remove_booking_detail_with_http_info(self, booking_id, bookingdetail_id, **kwargs):
"""
Remove a detail from a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_booking_detail_with_http_info(booking_id, bookingdetail_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int bookingdetail_id: (required)
:return: InlineResponse20017
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'bookingdetail_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_booking_detail" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `remove_booking_detail`")
# verify the required parameter 'bookingdetail_id' is set
if ('bookingdetail_id' not in params) or (params['bookingdetail_id'] is None):
raise ValueError("Missing the required parameter `bookingdetail_id` when calling `remove_booking_detail`")
resource_path = '/booking/remove-detail'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'bookingdetail_id' in params:
query_params['bookingdetail_id'] = params['bookingdetail_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20017',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def resend_confirmation(self, booking_id, **kwargs):
"""
Resend the confirmation email to the lead customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.resend_confirmation(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.resend_confirmation_with_http_info(booking_id, **kwargs)
else:
(data) = self.resend_confirmation_with_http_info(booking_id, **kwargs)
return data
def resend_confirmation_with_http_info(self, booking_id, **kwargs):
"""
Resend the confirmation email to the lead customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.resend_confirmation_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method resend_confirmation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `resend_confirmation`")
resource_path = '/booking/resend-confirmation'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def reserve_booking(self, booking_id, **kwargs):
"""
Reserve a booking and its sessions capcity until a set date
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reserve_booking(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param date reserved_until:
:return: InlineResponse20018
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.reserve_booking_with_http_info(booking_id, **kwargs)
else:
(data) = self.reserve_booking_with_http_info(booking_id, **kwargs)
return data
def reserve_booking_with_http_info(self, booking_id, **kwargs):
"""
Reserve a booking and its sessions capcity until a set date
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reserve_booking_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param date reserved_until:
:return: InlineResponse20018
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'reserved_until']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reserve_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `reserve_booking`")
resource_path = '/booking/reserve'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'reserved_until' in params:
query_params['reserved_until'] = params['reserved_until']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20018',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def save_booking(self, booking_id, **kwargs):
"""
Save a booking as a quote and release all capacity of sessions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.save_booking(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.save_booking_with_http_info(booking_id, **kwargs)
else:
(data) = self.save_booking_with_http_info(booking_id, **kwargs)
return data
def save_booking_with_http_info(self, booking_id, **kwargs):
"""
Save a booking as a quote and release all capacity of sessions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.save_booking_with_http_info(booking_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_booking" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `save_booking`")
resource_path = '/booking/save'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def set_lead_customer(self, booking_id, customer_id, **kwargs):
"""
Set the lead customer for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_lead_customer(booking_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int customer_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_lead_customer_with_http_info(booking_id, customer_id, **kwargs)
else:
(data) = self.set_lead_customer_with_http_info(booking_id, customer_id, **kwargs)
return data
def set_lead_customer_with_http_info(self, booking_id, customer_id, **kwargs):
"""
Set the lead customer for a booking
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_lead_customer_with_http_info(booking_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int booking_id: (required)
:param int customer_id: (required)
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['booking_id', 'customer_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_lead_customer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'booking_id' is set
if ('booking_id' not in params) or (params['booking_id'] is None):
raise ValueError("Missing the required parameter `booking_id` when calling `set_lead_customer`")
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `set_lead_customer`")
resource_path = '/booking/set-lead'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'booking_id' in params:
query_params['booking_id'] = params['booking_id']
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 40.411299
| 227
| 0.564843
| 11,832
| 117,314
| 5.371704
| 0.024172
| 0.065452
| 0.029422
| 0.029453
| 0.948252
| 0.93856
| 0.933006
| 0.929088
| 0.913591
| 0.902089
| 0
| 0.004445
| 0.353786
| 117,314
| 2,902
| 228
| 40.425224
| 0.83394
| 0.321309
| 0
| 0.782577
| 0
| 0
| 0.175755
| 0.030992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038799
| false
| 0
| 0.005124
| 0
| 0.101757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b26ae191e082e55133e2d52c73c69a2e1e6de65
| 2,757
|
py
|
Python
|
ckanext/metadata/logic/auth/get.py
|
SAEONData/ckanext-metadata
|
af1a137e5d924f05ea1835b81f36f808700d3aa7
|
[
"MIT"
] | null | null | null |
ckanext/metadata/logic/auth/get.py
|
SAEONData/ckanext-metadata
|
af1a137e5d924f05ea1835b81f36f808700d3aa7
|
[
"MIT"
] | 76
|
2018-04-10T12:51:56.000Z
|
2021-02-22T11:41:03.000Z
|
ckanext/metadata/logic/auth/get.py
|
SAEONData/ckanext-metadata
|
af1a137e5d924f05ea1835b81f36f808700d3aa7
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import logging
log = logging.getLogger(__name__)
def metadata_standard_show(context, data_dict):
return {'success': True}
def metadata_schema_show(context, data_dict):
return {'success': True}
def infrastructure_show(context, data_dict):
return {'success': True}
def metadata_collection_show(context, data_dict):
return {'success': True}
def metadata_record_show(context, data_dict):
return {'success': True}
def metadata_standard_list(context, data_dict):
return {'success': True}
def metadata_schema_list(context, data_dict):
return {'success': True}
def metadata_schema_dependent_record_list(context, data_dict):
return {'success': True}
def infrastructure_list(context, data_dict):
return {'success': True}
def metadata_collection_list(context, data_dict):
return {'success': True}
def metadata_record_list(context, data_dict):
return {'success': True}
def metadata_record_validation_schema_list(context, data_dict):
return {'success': True}
def metadata_record_validation_activity_show(context, data_dict):
return {'success': True}
def metadata_validity_check(context, data_dict):
return {'success': True}
def metadata_record_workflow_rules_check(context, data_dict):
return {'success': True}
def metadata_record_workflow_activity_show(context, data_dict):
return {'success': True}
def metadata_record_workflow_annotation_show(context, data_dict):
return {'success': True}
def metadata_record_workflow_annotation_list(context, data_dict):
return {'success': True}
def metadata_record_workflow_augmented_show(context, data_dict):
return {'success': True}
def workflow_state_show(context, data_dict):
return {'success': True}
def workflow_state_list(context, data_dict):
return {'success': True}
def workflow_transition_show(context, data_dict):
return {'success': True}
def workflow_transition_list(context, data_dict):
return {'success': True}
def workflow_annotation_show(context, data_dict):
return {'success': True}
def workflow_annotation_list(context, data_dict):
return {'success': True}
def metadata_json_attr_map_show(context, data_dict):
return {'success': True}
def metadata_json_attr_map_list(context, data_dict):
return {'success': True}
def metadata_json_attr_map_apply(context, data_dict):
return {'success': True}
def metadata_record_attr_match(context, data_dict):
return {'success': True}
def metadata_record_exact_match(context, data_dict):
return {'success': True}
def metadata_standard_index_show(context, data_dict):
return {'success': True}
def metadata_record_index_show(context, data_dict):
return {'success': True}
| 20.574627
| 65
| 0.750091
| 353
| 2,757
| 5.507082
| 0.116147
| 0.18107
| 0.246914
| 0.345679
| 0.942387
| 0.942387
| 0.942387
| 0.942387
| 0.884259
| 0.584362
| 0
| 0.000423
| 0.142909
| 2,757
| 133
| 66
| 20.729323
| 0.82226
| 0.005441
| 0
| 0.484848
| 0
| 0
| 0.081752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.484848
| false
| 0
| 0.015152
| 0.484848
| 0.984848
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
d99b7e985d79df0e3fc06e77403ea7bff3febbf6
| 20,611
|
py
|
Python
|
src/old/processFeatureFile.py
|
shashankpr/sleep-classification
|
302a0eb50ddf64ab51006a93a58a2a2fe9732a8c
|
[
"MIT"
] | 3
|
2019-03-31T04:20:58.000Z
|
2021-12-30T19:26:59.000Z
|
src/old/processFeatureFile.py
|
shashankpr/sleep-classification
|
302a0eb50ddf64ab51006a93a58a2a2fe9732a8c
|
[
"MIT"
] | 1
|
2021-02-03T16:52:54.000Z
|
2021-02-03T16:52:54.000Z
|
src/old/processFeatureFile.py
|
shashankpr/sleep-classification
|
302a0eb50ddf64ab51006a93a58a2a2fe9732a8c
|
[
"MIT"
] | 1
|
2017-12-06T13:07:22.000Z
|
2017-12-06T13:07:22.000Z
|
from sklearn import preprocessing
import matplotlib as plt
from sklearn.externals import joblib
from sklearn.pipeline import Pipeline
import pickle
# from sknn.mlp import Classifier, Layer
import pandas as pd
import numpy as np
import csv
import datetime as dt
def set_classifier(feature_files, label_files):
# read from csv instead
print "Reading training data..."
feature_list = []
label_list = []
time_list = []
processedDict = {}
for file_itr in range(0, len(feature_files)):
feature_file = feature_files[file_itr]
label_file = label_files[file_itr]
print "----------------------------"
print "Feature File:"
print feature_file
print "Label File:"
print label_file
print "----------------------------"
feature_list_file = []
label_list_file = []
label_time_list_file = []
# ff = open(feature_file, 'r')
ff = pd.read_csv(feature_file)
# print ff.info
# print ff['TIME']
# reader = csv.reader(ff)
for count, row in enumerate(ff['TIME']):
# print row
feature_row_dict = {}
try:
# print str(row[1])
timeObject = dt.datetime.strptime(str(row), '%Y-%m-%d %H:%M:%S.%f')
# timeObject = timeObject + dt.timedelta(hours=5, minutes=30)
except ValueError:
timeObject = dt.datetime.strptime(str(row), '%Y-%m-%d %H:%M:%S')
# timeObject = timeObject + dt.timedelta(hours=5, minutes=30)
feature_row_dict['TIME'] = timeObject
float_features = []
float_features.append(float(ff['HSIGNAL'][count]))
float_features.append(float(ff['RSIGNAL'][count]))
feature_row_dict['FEATURES'] = float_features
# print np.asarray(float_features).shape
feature_list_file.append(feature_row_dict)
print "----------------------------"
print "Number of Epochs for Dozee:"
print len(feature_list_file)
print "----------------------------"
# lf = open(label_file, 'r')
lf = pd.read_csv(label_file, header=None, names=['TIME', 'LABEL'])
# label_reader = csv.reader(lf)
# print lf.info
# print lf.ix[0]
for count, label_row in enumerate(lf['TIME']):
# print label_row
try:
labeltimeObject = dt.datetime.strptime(str(label_row), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
labeltimeObject = dt.datetime.strptime(str(label_row), '%Y-%m-%d %H:%M:%S')
label_time_list_file.append(labeltimeObject)
label = lf['LABEL'][count]
# print label
label_list_file.append(label)
file_epoch_counter = 0
diff = -30
for itr in range(0, len(label_time_list_file)):
if (itr != len(label_time_list_file) - 1):
label_time = label_time_list_file[itr]
label_end_time = label_time_list_file[itr + 1]
for dict in feature_list_file:
dict_time = dict['TIME']
epoch_start_time = dict_time - dt.timedelta(seconds=15)
epoch_end_time = dict_time + dt.timedelta(seconds=15)
if (epoch_start_time > label_time and epoch_end_time < label_end_time):
# if(int(label_list_file[itr]) != 4):
time_val = label_time + dt.timedelta(seconds=diff + 30)
feature_list.append(dict['FEATURES'])
label_list.append(int(label_list_file[itr]))
time_list.append(time_val)
file_epoch_counter = file_epoch_counter + 1
diff = diff + 30
else:
diff = -30
print "----------------------------"
print "Number of Epochs matched:"
print file_epoch_counter
print "----------------------------"
print "----------------------------"
print "Number of Total Epochs:"
print len(feature_list)
print "Number of Total Labels:"
print len(label_list)
print "----------------------------"
print "Number of Timestamps"
print len(time_list)
print "----------------------------"
# df = pd.DataFrame(processedDict)
# df.to_csv("processed.csv")
num_deep_epochs = label_list.count(1)
num_light_epochs = label_list.count(2)
num_rem_epochs = label_list.count(3)
num_wake_epochs = label_list.count(4)
print "----------------------------"
print "Number of Deep Epochs:"
print num_deep_epochs
print "Number of Light Labels:"
print num_light_epochs
print "Number of REM Epochs:"
print num_rem_epochs
print "Number of Wake Labels:"
print num_wake_epochs
print "----------------------------"
# X_train = np.array(feature_list)
# y_train = np.array(label_list)
X_train = feature_list
y_train = label_list
processedDict["TIME"] = time_list
processedDict["FEATURES"] = X_train
processedDict["LABELS"] = y_train
# df = pd.DataFrame(processedDict)
# df.to_csv("processed.csv")
return processedDict
def set_classifierTest(feature_files, label_files):
# read from csv instead
print "Reading test data..."
feature_list = []
label_list = []
time_list = []
processedDictTest = {}
for file_itr in range(0, len(feature_files)):
feature_file = feature_files[file_itr]
label_file = label_files[file_itr]
print "----------------------------"
print "Feature File:"
print feature_file
print "Label File:"
print label_file
print "----------------------------"
feature_list_file = []
label_list_file = []
label_time_list_file = []
ff = open(feature_file, 'r')
reader = csv.reader(ff)
# print reader[-1]
for row in reader:
feature_row_dict = {}
try:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S')
feature_row_dict['TIME'] = timeObject
float_features = []
for feature in row[1:]:
float_features.append(float(feature))
feature_row_dict['FEATURES'] = float_features
feature_list_file.append(feature_row_dict)
print "----------------------------"
print "Number of Epochs for Dozee:"
print len(feature_list_file)
print "----------------------------"
lf = open(label_file, 'r')
label_reader = csv.reader(lf)
for label_row in label_reader:
try:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S')
label_time_list_file.append(labeltimeObject)
label_list_file.append(label_row[1])
file_epoch_counter = 0
diff = -30
for itr in range(0, len(label_time_list_file)):
if (itr != len(label_time_list_file) - 1):
label_time = label_time_list_file[itr]
label_end_time = label_time_list_file[itr + 1]
for dict in feature_list_file:
dict_time = dict['TIME']
epoch_start_time = dict_time - dt.timedelta(seconds=15)
epoch_end_time = dict_time + dt.timedelta(seconds=15)
if (epoch_start_time > label_time and epoch_end_time < label_end_time):
time_val = label_time + dt.timedelta(seconds=diff + 30)
feature_list.append(dict['FEATURES'])
label_list.append(int(label_list_file[itr]))
time_list.append(time_val)
file_epoch_counter = file_epoch_counter + 1
diff = diff + 30
else:
diff = -30
print "----------------------------"
print "Number of Epochs matched:"
print file_epoch_counter
print "----------------------------"
print "----------------------------"
print "Number of Total TEST Epochs:"
print len(feature_list)
print "Number of Total TEST Labels:"
print len(label_list)
print "----------------------------"
# X_train = feature_list
# y_train = label_list
y_test = []
for x in label_list:
y_test.append(int(x))
num_deep_epochs = label_list.count(1)
num_light_epochs = label_list.count(2)
num_rem_epochs = label_list.count(3)
num_wake_epochs = label_list.count(4)
print "----------------------------"
print "Number of Deep Epochs:"
print num_deep_epochs
print "Number of Light Labels:"
print num_light_epochs
print "Number of REM Epochs:"
print num_rem_epochs
print "Number of Wake Labels:"
print num_wake_epochs
print "----------------------------"
X_test = np.asarray(feature_list)
y_test = np.asarray(y_test)
processedDictTest["TIME"] = time_list
processedDictTest["FEATURES"] = X_test
processedDictTest["LABELS"] = y_test
return processedDictTest
def set_classifierValidation(feature_files, label_files):
# read from csv instead
print "Reading Validation data..."
feature_list = []
label_list = []
time_list = []
processedDictValidation = {}
for file_itr in range(0, len(feature_files)):
feature_file = feature_files[file_itr]
label_file = label_files[file_itr]
print "----------------------------"
print "Feature File:"
print feature_file
print "Label File:"
print label_file
print "----------------------------"
feature_list_file = []
label_list_file = []
label_time_list_file = []
ff = open(feature_file, 'r')
reader = csv.reader(ff)
# print reader[-1]
for row in reader:
feature_row_dict = {}
try:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S')
feature_row_dict['TIME'] = timeObject
float_features = []
for feature in row[1:]:
float_features.append(float(feature))
feature_row_dict['FEATURES'] = float_features
feature_list_file.append(feature_row_dict)
print "----------------------------"
print "Number of Epochs for Dozee:"
print len(feature_list_file)
print "----------------------------"
lf = open(label_file, 'r')
label_reader = csv.reader(lf)
for label_row in label_reader:
try:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S')
label_time_list_file.append(labeltimeObject)
label_list_file.append(label_row[1])
file_epoch_counter = 0
diff = -30
for itr in range(0, len(label_time_list_file)):
if (itr != len(label_time_list_file) - 1):
label_time = label_time_list_file[itr]
label_end_time = label_time_list_file[itr + 1]
for dict in feature_list_file:
dict_time = dict['TIME']
epoch_start_time = dict_time - dt.timedelta(seconds=15)
epoch_end_time = dict_time + dt.timedelta(seconds=15)
if (epoch_start_time > label_time and epoch_end_time < label_end_time):
time_val = label_time + dt.timedelta(seconds=diff + 30)
feature_list.append(dict['FEATURES'])
label_list.append(int(label_list_file[itr]))
time_list.append(time_val)
file_epoch_counter = file_epoch_counter + 1
diff = diff + 30
else:
diff = -30
print "----------------------------"
print "Number of Epochs matched:"
print file_epoch_counter
print "----------------------------"
print "----------------------------"
print "Number of Total TEST Epochs:"
print len(feature_list)
print "Number of Total TEST Labels:"
print len(label_list)
print "----------------------------"
# X_train = feature_list
# y_train = label_list
y_test = []
for x in label_list:
y_test.append(int(x))
num_deep_epochs = label_list.count(1)
num_light_epochs = label_list.count(2)
num_rem_epochs = label_list.count(3)
num_wake_epochs = label_list.count(4)
print "----------------------------"
print "Number of Deep Epochs:"
print num_deep_epochs
print "Number of Light Labels:"
print num_light_epochs
print "Number of REM Epochs:"
print num_rem_epochs
print "Number of Wake Labels:"
print num_wake_epochs
print "----------------------------"
X_test = np.asarray(feature_list)
y_test = np.asarray(y_test)
processedDictValidation["TIME"] = time_list
processedDictValidation["FEATURES"] = X_test
processedDictValidation["LABELS"] = y_test
return processedDictValidation
def set_classifierNew(feature_files):
# read from csv instead
print "Reading test data..."
feature_list = []
label_list = []
time_list = []
processedNew = {}
feature_list_file = []
for file_itr in range(0, len(feature_files)):
feature_file = feature_files[file_itr]
# label_file = label_files[file_itr]
print "----------------------------"
print "Feature File:"
print feature_file
print "----------------------------"
feature_list_file = []
# label_list_file = []
# label_time_list_file = []
ff = open(feature_file, 'r')
reader = csv.reader(ff)
# print reader[-1]
for row in reader:
feature_row_dict = {}
try:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S')
feature_row_dict['TIME'] = timeObject
float_features = []
for feature in row[1:]:
float_features.append(float(feature))
feature_row_dict['FEATURES'] = float_features
feature_list_file.append(feature_row_dict)
for dict in feature_list_file:
dict_time = dict['TIME']
dict_features = dict["FEATURES"]
feature_list.append(dict_features)
time_list.append(dict_time)
print "----------------------------"
print "Number of Epochs for Dozee:"
print len(feature_list)
print "----------------------------"
X_test = np.asarray(feature_list)
processedNew['FEATURES'] = X_test
processedNew['TIME'] = time_list
# pd.DataFrame(processedNew).to_csv("new_file.csv")
return processedNew
def set_classifier_epoch_gen(feature_files, label_files):
# read from csv instead
print "Reading training data..."
feature_list = []
label_list = []
time_list = []
processedDict = {}
for file_itr in range(0, len(feature_files)):
feature_file = feature_files[file_itr]
label_file = label_files[file_itr]
print "----------------------------"
print "Feature File:"
print feature_file
print "Label File:"
print label_file
print "----------------------------"
feature_list_file = []
label_list_file = []
label_time_list_file = []
ff = open(feature_file, 'r')
reader = csv.reader(ff)
for row in reader:
feature_row_dict = {}
try:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S.%f')
# timeObject = timeObject + dt.timedelta(hours=5, minutes=30)
except ValueError:
timeObject = dt.datetime.strptime(str(row[0]), '%Y-%m-%d %H:%M:%S')
# timeObject = timeObject + dt.timedelta(hours=5, minutes=30)
feature_row_dict['TIME'] = timeObject
float_features = []
for feature in row[1:]:
float_features.append(float(feature))
feature_row_dict['FEATURES'] = float_features
feature_list_file.append(feature_row_dict)
print "----------------------------"
print "Number of Epochs for Dozee:"
print len(feature_list_file)
print "----------------------------"
lf = open(label_file, 'r')
label_reader = csv.reader(lf)
for label_row in label_reader:
try:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
labeltimeObject = dt.datetime.strptime(str(label_row[0]), '%Y-%m-%d %H:%M:%S')
label_time_list_file.append(labeltimeObject)
label_list_file.append(label_row[1])
file_epoch_counter = 0
diff = -30
for itr in range(0, len(label_time_list_file)):
if (itr != len(label_time_list_file) - 1):
label_time = label_time_list_file[itr]
label_end_time = label_time_list_file[itr + 1]
for dict in feature_list_file:
dict_time = dict['TIME']
epoch_start_time = dict_time - dt.timedelta(seconds=15)
epoch_end_time = dict_time + dt.timedelta(seconds=15)
if (epoch_start_time > label_time and epoch_end_time < label_end_time):
# if(int(label_list_file[itr]) != 4):
time_val = label_time + dt.timedelta(seconds=diff + 30)
feature_list.append(dict['FEATURES'])
label_list.append(int(label_list_file[itr]))
time_list.append(time_val)
file_epoch_counter = file_epoch_counter + 1
diff = diff + 30
else:
diff = -30
print "----------------------------"
print "Number of Epochs matched:"
print file_epoch_counter
print "----------------------------"
print "----------------------------"
print "Number of Total Epochs:"
print len(feature_list)
print "Number of Total Labels:"
print len(label_list)
print "----------------------------"
print "Number of Timestamps"
print len(time_list)
print "----------------------------"
processedDict["TIME"] = time_list
processedDict["FEATURES"] = feature_list
processedDict["LABELS"] = label_list
# df = pd.DataFrame(processedDict)
# df.to_csv("processed.csv")
num_deep_epochs = label_list.count(1)
num_light_epochs = label_list.count(2)
num_rem_epochs = label_list.count(3)
num_wake_epochs = label_list.count(4)
print "----------------------------"
print "Number of Deep Epochs:"
print num_deep_epochs
print "Number of Light Labels:"
print num_light_epochs
print "Number of REM Epochs:"
print num_rem_epochs
print "Number of Wake Labels:"
print num_wake_epochs
print "----------------------------"
X_train = np.array(feature_list)
y_train = np.array(label_list)
return processedDict
| 37.680073
| 98
| 0.527485
| 2,290
| 20,611
| 4.48821
| 0.056332
| 0.046702
| 0.044269
| 0.04135
| 0.882759
| 0.868457
| 0.8561
| 0.850068
| 0.846663
| 0.842284
| 0
| 0.009178
| 0.318034
| 20,611
| 546
| 99
| 37.749084
| 0.72204
| 0.060744
| 0
| 0.881007
| 0
| 0
| 0.155691
| 0.068627
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.020595
| null | null | 0.318078
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d9d57dff5e811dfa8e1ffe6772ae4db88911c982
| 58,212
|
py
|
Python
|
PointNetGPD/model/dataset.py
|
MrRen-sdhm/PointNetGPD
|
b846164814e4fa586eefc7e23a562dcda419fe9b
|
[
"MIT"
] | null | null | null |
PointNetGPD/model/dataset.py
|
MrRen-sdhm/PointNetGPD
|
b846164814e4fa586eefc7e23a562dcda419fe9b
|
[
"MIT"
] | null | null | null |
PointNetGPD/model/dataset.py
|
MrRen-sdhm/PointNetGPD
|
b846164814e4fa586eefc7e23a562dcda419fe9b
|
[
"MIT"
] | null | null | null |
import os
import glob
import pickle
import pcl
import torch
import torch.utils.data
import torch.nn as nn
import numpy as np
# global configurations:
from autolab_core import YamlConfig
from dexnet.grasping import GpgGraspSampler
from dexnet.grasping import RobotGripper
home_dir = os.environ['HOME']
yaml_config = YamlConfig(home_dir + "/Projects/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/Projects/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSampler(gripper, yaml_config)
class PointGraspDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 2
else:
label = 1
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 150 # 最低点数限制
# projection related 投影相关参数
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
# google扫描仪到点云的转换矩阵
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy')) # grasp pose file
# 仅获取相机NP3采集的点云
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy')) # point cloud file
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc: # 获取点云文件列表
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp: # 获取已生成的抓取姿态列表
grasp_fl_name = i.split('/')[-1].split('.')[0] # grasp文件名
cnt = grasp_fl_name.split('_')[-1] # grasp文件尾
head = grasp_fl_name.split('_')[0] # grasp文件头
k = grasp_fl_name[len(head)+1:-(len(cnt)+1)] # 标准物品名称
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys()) # objects to deal with
# print("object1", object1)
object2 = set(self.transform.keys()) # all ycb objects name
# print("object2", object2)
self.object = list(object1)
# self.object = list(object1.intersection(object2)) # 取交集
print("objects to deal with", self.object)
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
"""
获取手抓闭合区域中的点云
:param grasp: 扫描仪获取的mesh坐标系下抓取姿态 (grasp_center, grasp_axis, grasp_angle, grasp_width, jaw_width)
:param pc: 点云
:param transform: 扫描仪mesh到点云的转换矩阵
:param vis: 可视化选项
:return: 手抓闭合区域中的点云, 或其投影
"""
# 轴角表示
center = grasp[0:3] # 抓取姿态中心点
axis = grasp[3:6] # binormal 副法线
width = grasp[6] # 抓取姿态宽度
angle = grasp[7] # 旋转角
axis = axis/np.linalg.norm(axis) # (3,)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t], [0, 1, 0], [-sin_t, 0, cos_t]] # 旋转矩阵
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
# 各轴单位方向向量
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]] # 旋转矩阵
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach) # 手抓朝向
minor_normal = -np.cross(axis, approach) # 次曲率方向 NOTE: 添加了负号调整为右手坐标系
# 碰撞检测
# grasp_bottom_center = -ags.gripper.hand_depth * approach + center
# hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
# local_hand_points = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# if_collide = ags.check_collide(grasp_bottom_center, approach,
# binormal, minor_normal, graspable, local_hand_points)
vis = False
if vis: # NOTE:此处获得的抓取姿态可能与点云存在碰撞(影响不是很大)!!! TODO:碰撞检查
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# ---扫描仪坐标系下---:
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(center, color='r', scale_factor=.008)
# 显示手抓坐标系
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.4, 0.6, 0.0))
mlab.title("google", size=0.3, color=(0, 0, 0))
mlab.show()
left = center - width*axis/2 # 手抓最左侧点
right = center + width*axis/2 # 手抓最右侧点
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
# NOTE: m:mesh c:center p:point cloud
matrix_m2c = np.array([approach, binormal, minor_normal]) # 旋转矩阵: 扫描仪坐标系->中心点坐标系
matrix_p2m = transform[:3, :3] # 旋转矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = transform[:, 3:][:3].reshape(3,) # 平移矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = np.array([trans_p2m[0], trans_p2m[1], trans_p2m[2] + 0.02]) # 微调
pc_p2m = np.dot(matrix_p2m.T, (pc - trans_p2m).T).T # 配准到扫描仪坐标系下的点云
pc_m2c = (np.dot(matrix_m2c, (pc_p2m-center).T)).T # 扫描仪坐标系下点云转换到中心点坐标系下
# pc_c2m = (np.dot(matrix_m2c.T, pc_m2c.T)).T + center # 中心点坐标系下点云转换到扫描仪坐标系下
left_t = (-width * np.array([0, 1, 0]) / 2).squeeze()
right_t = (width * np.array([0, 1, 0]) / 2).squeeze()
# 获取手抓闭合区域中的点
x_limit = ags.gripper.hand_depth
z_limit = ags.gripper.hand_height
y_limit = width
x1 = pc_m2c[:, 0] > -x_limit
x2 = pc_m2c[:, 0] < 0
y1 = pc_m2c[:, 1] > -y_limit/2
y2 = pc_m2c[:, 1] < y_limit/2
z1 = pc_m2c[:, 2] > -z_limit/2
z2 = pc_m2c[:, 2] < z_limit/2
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0] # 手抓闭合区域中点的索引
if len(self.in_ind) < self.min_point_limit: # 手抓闭合区域内点数太少
# print("\033[0;32m%s\033[0m" % "[INFO] points num", len(self.in_ind))
return None
vis = False
if vis: # 显示手抓闭合区域内点云
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
# show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(pc_p2m, color='g', scale_factor=.002) # 配准到扫描仪坐标系下点云
show_points(pc_m2c, color='b', scale_factor=.002) # 手抓中心坐标系下点云
# show_points(pc_c2m, color='r', scale_factor=.002) # 手抓中心坐标系转换到扫描仪坐标系下点云
# 显示扫描仪坐标系下手抓
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.0, 1.0, 0.0))
# 中心点坐标系下手抓(应在世界坐标系原点)
hand_points = (np.dot(matrix_m2c, (hand_points - center).T)).T # 手抓关键点转换到中心点坐标系
ags.show_grasp_3d(hand_points, color=(0.5, 0.5, 0.5)) # 显示手抓
# 扫描仪坐标系下抓取坐标系
show_points(center, color='r', scale_factor=.008) # 扫描仪坐标系下中心点
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
show_points(pc_m2c, color='c', scale_factor=.002) # 手抓中心坐标系下点云
show_points(pc_m2c[self.in_ind], color='b', scale_factor=.002) # 中心点坐标系下手抓闭合区域中的点云
pc_c2m_region = (np.dot(matrix_m2c.T, pc_m2c[self.in_ind].T)).T + center # 扫描仪坐标系下手抓闭合区域中的点云
show_points(pc_c2m_region, color='r', scale_factor=.002)
# 显示手抓闭合区域
# x = (np.array([[-1, 1, 1, -1, -1], [-1, 1, 1, -1, -1]]) - 1) * x_limit/2
# y = np.array([[-1, -1, -1, -1, -1], [1, 1, 1, 1, 1]]) * y_limit
# z = np.array([[1, 1, -1, -1, 1], [1, 1, -1, -1, 1]]) * z_limit
# mlab.mesh(x, y, z, color=(1, 0, 0), opacity=0.4)
# 体积为1的正方体的八个顶点
x_arr = np.array([-1, 1, 1, -1, -1, 1, 1, -1])/2
y_arr = np.array([-1, -1, 1, 1, -1, -1, 1, 1])/2
z_arr = np.array([-1, -1, -1, -1, 1, 1, 1, 1])/2
x = (x_arr - 0.5) * ags.gripper.hand_depth # 平移半个单位
y = y_arr * (ags.gripper.hand_outer_diameter-2*ags.gripper.finger_width)
z = z_arr * ags.gripper.hand_height
triangles = [(0, 1, 2), (0, 2, 3), (4, 5, 6), (4, 6, 7), (1, 5, 6), (1, 2, 6),
(0, 4, 7), (0, 3, 7), (2, 3, 6), (3, 6, 7), (0, 1, 5), (0, 4, 5)]
mlab.triangular_mesh(x, y, z, triangles, color=(1, 0, 1), opacity=0.2)
mlab.title("cloud", size=0.3, color=(0, 0, 0))
mlab.show()
if self.projection:
return self.project_pc(pc_m2c, width) # 返回投影后的点云
else:
return pc_m2c[self.in_ind] # 返回手抓闭合区域中的点云
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
"""
计算点云投影
:param point_cloud_voxel:
:param m_width_of_pic:
:param margin:
:param surface_normal:
:param order:
:param gripper_width:
:return:
"""
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
获取手抓闭合区域中点云的投影
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check) != 0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# 获取物体和抓取姿态索引
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind] # 物体名称, 用于获取抓取姿态
obj_pc = self.transform[obj_grasp][0] # 物体名称, 用于获取点云
f_grasp = self.d_grasp[obj_grasp] # 抓取姿态文件名
fl_pc = np.array(self.d_pc[obj_pc]) # 各视角点云文件名
np.random.shuffle(fl_pc) # 打乱文件
grasp = np.load(f_grasp)[grasp_ind] # 获取抓取姿态
pc = np.load(fl_pc[-1]) # 随机获取点云
t = self.transform[obj_grasp][1] # 获取扫描仪到点云的转换矩阵, 抓取姿态在扫描仪采集的mesh文件上获取, 须转换到
# debug
# level_score_, refine_score_ = grasp[-2:]
# score_ = level_score_ + refine_score_ * 0.01
# if score_ >= self.thresh_bad:
# print("label: 0")
# elif score_ <= self.thresh_good:
# print("label: 1")
grasp_pc = self.collect_pc(grasp, pc, t) # 获取手抓闭合区域中的点云
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
# 点数不够则有放回采样, 点数太多则随机采样
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0)) # 调整通道顺序
# 根据score分类
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
# print("grasp_pc", grasp_pc, grasp_pc.shape, label) # (3, 750)
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind] # 抓取姿态
obj_pc = self.transform[obj_grasp][0] # 物体点云
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
np.random.shuffle(fl_pc)
grasp = np.load(f_grasp)[grasp_ind]
pc = np.load(fl_pc[-1])
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 2
else:
label = 1
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
if __name__ == '__main__':
try:
from mayavi import mlab
except ImportError:
print("Can not import mayavi")
mlab = None
def worker_init_fn(pid): # After creating the workers, each worker has an independent seed
np.random.seed(torch.initial_seed() % (2 ** 31 - 1))
def my_collate(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def show_points(point, color='lb', scale_factor=.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
elif color == 'lb': # light blue
color_f = (0.22, 1, 1)
else:
color_f = (1, 1, 1)
if point.size == 3: # vis for only one point, shape must be (3,), for shape (1, 3) is not work
point = point.reshape(3, )
mlab.points3d(point[0], point[1], point[2], color=color_f, scale_factor=scale_factor)
else: # vis for multiple points
mlab.points3d(point[:, 0], point[:, 1], point[:, 2], color=color_f, scale_factor=scale_factor)
def show_line(un1, un2, color='g', scale_factor=0.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
else:
color_f = (1, 1, 1)
mlab.plot3d([un1[0], un2[0]], [un1[1], un2[1]], [un1[2], un2[2]], color=color_f, tube_radius=scale_factor)
grasp_points_num = 1000
obj_points_num = 50000
pc_file_used_num = 20
thresh_good = 0.6
thresh_bad = 0.6
input_size = 60
input_chann = 12 # 12
# a = PointGraspDataset(
# obj_points_num=obj_points_num,
# grasp_points_num=grasp_points_num,
# pc_file_used_num=pc_file_used_num,
# path="../data",
# tag='train',
# grasp_amount_per_file=2000,
# thresh_good=thresh_good,
# thresh_bad=thresh_bad,
# projection=True,
# project_chann=input_chann,
# project_size=input_size,
# )
# c, d = a.__getitem__(0)
b = PointGraspOneViewDataset(
grasp_points_num=grasp_points_num,
path="../data",
tag='train',
grasp_amount_per_file=2100, # 6500
thresh_good=thresh_good,
thresh_bad=thresh_bad,
)
cnt = 0
for i in range(b.__len__()):
try:
grasp_pc, label = b.__getitem__(i)
cnt += 1
except (RuntimeError, TypeError, NameError):
print("[INFO] don't have valid points!")
else:
print("[INFO] get points success!")
# print("grasp_pc:", grasp_pc[0], grasp_pc[0].shape, grasp_pc.shape, "\nlable:", label)
# break
# pass
print("[INFO] have {} valid grasp in the dataset.".format(cnt))
# train_loader = torch.utils.data.DataLoader(
# PointGraspOneViewDataset(
# grasp_points_num=grasp_points_num,
# path="../data",
# tag='train',
# grasp_amount_per_file=2100, # 6500
# thresh_good=thresh_good,
# thresh_bad=thresh_bad,
# ),
# batch_size=64,
# num_workers=32,
# pin_memory=True,
# shuffle=True,
# worker_init_fn=worker_init_fn,
# collate_fn=my_collate,
# drop_last=True, # fix bug: ValueError: Expected more than 1 value per channel when training
# )
#
# for batch_idx, (data, target) in enumerate(train_loader):
# # print("data", data, data.shape, "target", target)
# pass
| 42.929204
| 126
| 0.573473
| 8,324
| 58,212
| 3.757208
| 0.060788
| 0.009592
| 0.01295
| 0.015476
| 0.873829
| 0.862062
| 0.850456
| 0.842654
| 0.836611
| 0.829289
| 0
| 0.03867
| 0.294991
| 58,212
| 1,356
| 127
| 42.929204
| 0.723392
| 0.098158
| 0
| 0.895794
| 0
| 0
| 0.024647
| 0.005936
| 0
| 0
| 0
| 0.000737
| 0.007648
| 1
| 0.030593
| false
| 0
| 0.013384
| 0.003824
| 0.096558
| 0.008604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9e3ad219bf1e4e92d5f3a19c1cec08bb2907d28
| 578
|
py
|
Python
|
CursoEmVideo/pythonProject/ex109/moeda.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
CursoEmVideo/pythonProject/ex109/moeda.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
CursoEmVideo/pythonProject/ex109/moeda.py
|
cassio645/Aprendendo-python
|
17a8b5a0e7abc3342d24841ed28093db13d2c130
|
[
"MIT"
] | null | null | null |
def metade(valor = 0, format = False):
resp = valor/2
return resp if format is False else money(resp)
def dobro(valor = 0, format = False):
resp = valor*2
return resp if format is False else money(resp)
def dez_porcento(valor = 0, format = False):
resp = valor + (valor*10)/100
return resp if format is False else money(resp)
def quinze_porcento(valor = 0, format = False):
resp = valor - (valor*15)/100
return resp if format is False else money(resp)
def money(valor = 0, moeda = 'R$'):
return f'{moeda}{valor:.2f}'.replace('.',',')
| 25.130435
| 51
| 0.647059
| 90
| 578
| 4.133333
| 0.277778
| 0.080645
| 0.129032
| 0.182796
| 0.811828
| 0.811828
| 0.811828
| 0.811828
| 0.602151
| 0.602151
| 0
| 0.040089
| 0.223183
| 578
| 23
| 52
| 25.130435
| 0.788419
| 0
| 0
| 0.285714
| 0
| 0
| 0.038062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0
| 0
| 0.071429
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
8a2a0966045872701795e9a52e9bbf7e3f271178
| 1,431
|
py
|
Python
|
jackpot/models.py
|
clonetech/jackpot
|
5033d795cdd40f738330a01de7b197ec1d521e6c
|
[
"BSD-3-Clause"
] | null | null | null |
jackpot/models.py
|
clonetech/jackpot
|
5033d795cdd40f738330a01de7b197ec1d521e6c
|
[
"BSD-3-Clause"
] | null | null | null |
jackpot/models.py
|
clonetech/jackpot
|
5033d795cdd40f738330a01de7b197ec1d521e6c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import datetime
from django.conf import settings
from django.urls import reverse
class Freetips(models.Model):
published_date = models.DateTimeField('Date Published')
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.IntegerField(default = 0)
away_score = models.IntegerField(default = 0)
away_team = models.CharField(max_length = 200)
safety = models.CharField(max_length = 200, default="")
prediction = models.CharField(max_length = 100)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class Singlebet(models.Model):
published_date = models.DateTimeField('Date Published')
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.IntegerField(default = 0)
away_score = models.IntegerField(default = 0)
away_team = models.CharField(max_length = 200)
safety = models.CharField(max_length = 200, default="")
prediction = models.CharField(max_length = 100)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
| 34.902439
| 110
| 0.715584
| 179
| 1,431
| 5.541899
| 0.256983
| 0.181452
| 0.217742
| 0.290323
| 0.8125
| 0.8125
| 0.8125
| 0.8125
| 0.8125
| 0.8125
| 0
| 0.033306
| 0.160727
| 1,431
| 40
| 111
| 35.775
| 0.792673
| 0
| 0
| 0.733333
| 0
| 0
| 0.0587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0.066667
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
8a754853523411880c3e98f4a848bcc44a1a2c2e
| 143,226
|
py
|
Python
|
tests/unit/pypyr/dsl_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/dsl_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/dsl_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | null | null | null |
"""dsl.py unit tests."""
from copy import deepcopy
from io import StringIO
import logging
import pytest
from unittest.mock import call, patch, MagicMock
from tests.common.utils import DeepCopyMagicMock, patch_logger
import ruamel.yaml as yamler
from ruamel.yaml.comments import CommentedMap, CommentedSeq, TaggedScalar
import pypyr.cache.stepcache as stepcache
from pypyr.context import Context
from pypyr.dsl import (Jsonify,
PyString,
SicString,
SpecialTagDirective,
Step,
RetryDecorator,
WhileDecorator)
from pypyr.errors import (Call,
HandledError,
LoopMaxExhaustedError,
PipelineDefinitionError)
def arb_step_mock(context):
"""No real reason, other than to mock the existence of a run_step."""
return 'from arb step mock'
# region custom yaml tags
# region SpecialTagDirective base
def test_special_tag_directive_base_no_get_value():
"""Base class SpecialTagDirective raises on get_value."""
base = SpecialTagDirective(None)
with pytest.raises(NotImplementedError):
base.get_value()
def test_special_tag_directive_base_eq():
"""Repr equivalence and inverse works."""
assert SpecialTagDirective(None) == SpecialTagDirective(None)
assert SpecialTagDirective('none') != SpecialTagDirective('some')
def test_special_tag_directive_repr_roundtrip():
"""Repr string repr evals back to instance."""
s = SpecialTagDirective('arb')
repr_string = repr(s)
assert repr_string == 'SpecialTagDirective(\'arb\')'
reconstituted = eval(repr_string)
assert isinstance(reconstituted, SpecialTagDirective)
assert str(reconstituted) == 'arb'
def test_special_tag_directive_truthy():
"""Special Tag String work as falsy, else Truthy."""
assert SpecialTagDirective('blah')
assert not SpecialTagDirective(None)
assert not SpecialTagDirective('')
# endregion SpecialTagDirective base
# region jsonify custom tag
def test_jsonify_behaves():
"""Jsonify does what it should."""
assert Jsonify.yaml_tag == '!jsonify'
jsonify = Jsonify({'a': 'string here', 'b': 123, 'c': False})
assert jsonify == Jsonify({'a': 'string here', 'b': 123, 'c': False})
assert jsonify
assert str(jsonify) == "{'a': 'string here', 'b': 123, 'c': False}"
assert repr(jsonify) == (
"Jsonify({'a': 'string here', 'b': 123, 'c': False})")
assert jsonify.get_value(Context({'a': 'BBB'})) == (
'{"a": "string here", "b": 123, "c": false}')
def get_yaml_jsonify_parser():
"""Create ruamel yaml parser with jsonify tag handler."""
yaml_parser = yamler.YAML(typ='rt', pure=True)
yaml_parser.register_class(Jsonify)
return yaml_parser
def get_yaml_with_jsonify(input_string):
"""Get yaml from yaml parser with jsonify tag."""
return get_yaml_jsonify_parser().load(input_string)
def get_string_from_yaml_with_jsonify(yaml):
"""Serialize yaml object to string."""
stream = StringIO()
get_yaml_jsonify_parser().dump(yaml, stream)
output = stream.getvalue()
stream.close()
return output
def test_jsonify_roundtrip_mapping():
"""Jsonify serializes and deserializes from yaml mapping."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
c1: v1
c2: 22
c3: 123.45
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedMap
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == {'c1': 'v1', 'c2': 22, 'c3': 123.45}
assert yaml['c'].get_value(Context()) == (
'{"c1": "v1", "c2": 22, "c3": 123.45}')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
" c1: v1\n"
" c2: 22\n"
" c3: 123.45\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_sequence():
"""Jsonify serializes and de-serializes from yaml sequence."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
- v1
- 22
- 123.45
- a: a value
b: 123
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedSeq
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == ['v1',
22,
123.45,
{'a': 'a value',
'b': 123}]
assert yaml['c'].get_value(Context()) == (
'["v1", 22, 123.45, {"a": "a value", "b": 123}]')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
"- v1\n"
"- 22\n"
"- 123.45\n"
"- a: a value\n"
" b: 123\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_scalar():
"""Jsonify serializes and de-serializes from yaml scalar."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify my scalar
d: !jsonify False
e: !jsonify 123
f: !jsonify '123'
"""
yaml = get_yaml_with_jsonify(yaml_string)
assert type(yaml['c']) is Jsonify
assert yaml['c'].value == 'my scalar'
assert type(yaml['c'].scalar) is TaggedScalar
assert repr(yaml['c']) == f"Jsonify('my scalar', {yaml['c'].scalar!r})"
assert yaml['d'].value is False
assert repr(yaml['d']) == f"Jsonify(False, {yaml['d'].scalar!r})"
assert yaml['e'].value == 123
assert repr(yaml['e']) == f"Jsonify(123, {yaml['e'].scalar!r})"
assert yaml['f'].value == '123'
assert repr(yaml['f']) == f"Jsonify('123', {yaml['f'].scalar!r})"
assert yaml['c'].get_value(Context()) == '"my scalar"'
assert yaml['d'].get_value(Context()) == 'false'
assert yaml['e'].get_value(Context()) == '123'
assert yaml['f'].get_value(Context()) == '"123"'
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify my scalar\n"
"d: !jsonify False\n"
"e: !jsonify 123\n"
"f: !jsonify '123'\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_mapping_substitutions():
"""Jsonify serializes & deserializes yaml mapping with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
c1: 'v{k3}'
c2: 22
c3: '{k2}'
c4: "{k1} b"
c5: '{k4}'
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'string {here}',
'k2': 123.45,
'k3': 1,
'k4': '{k2}'})
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedMap
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == {'c1': 'v{k3}',
'c2': 22,
'c3': '{k2}',
'c4': '{k1} b',
'c5': '{k4}'}
expected_json = (
'{"c1": "v1", "c2": 22, "c3": 123.45, "c4": "string {here} b", '
'"c5": 123.45}')
assert yaml['c'].get_value(context) == expected_json
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
" c1: v{k3}\n"
" c2: 22\n"
" c3: '{k2}'\n"
" c4: '{k1} b'\n"
" c5: '{k4}'\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_sequence_substitutions():
"""Jsonify serializes & de-serializes yaml sequence with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify
- v{k3}
- 22
- "{k2}"
- a: a value
b: '{k4}'
d: False
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'string {here}',
'k2': 123.45,
'k3': 1,
'k4': '{k2}'})
assert type(yaml['c']) is Jsonify
assert type(yaml['c'].value) is CommentedSeq
assert repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})"
assert yaml['c'].value == ['v{k3}',
22,
'{k2}',
{'a': 'a value',
'b': '{k4}'}]
assert yaml['c'].get_value(context) == (
'["v1", 22, 123.45, {"a": "a value", "b": 123.45}]')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify\n"
"- v{k3}\n"
"- 22\n"
"- '{k2}'\n"
"- a: a value\n"
" b: '{k4}'\n"
"d: false\n")
assert roundtripped_string == expected
def test_jsonify_roundtrip_scalar_substitutions():
"""Jsonify serializes & de-serializes yaml scalar with substitutions."""
yaml_string = """\
a: 1
b: '1'
c: !jsonify '{k1}'
d: !jsonify '{k2}'
e: !jsonify '{k3}'
f: !jsonify b {k4}
"""
yaml = get_yaml_with_jsonify(yaml_string)
context = Context({'k1': 'my scalar',
'k2': False,
'k3': 123,
'k4': 'a {k1}'})
assert type(yaml['c']) is Jsonify
assert yaml['c'].value == '{k1}'
assert type(yaml['c'].scalar) is TaggedScalar
assert repr(yaml['c']) == f"Jsonify('{{k1}}', {yaml['c'].scalar!r})"
assert yaml['d'].value == '{k2}'
assert yaml['e'].value == '{k3}'
assert yaml['f'].value == 'b {k4}'
assert yaml['c'].get_value(context) == '"my scalar"'
assert yaml['d'].get_value(context) == 'false'
assert yaml['e'].get_value(context) == '123'
assert yaml['f'].get_value(context) == '"b a {k1}"'
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = (
"a: 1\n"
"b: '1'\n"
"c: !jsonify '{k1}'\n"
"d: !jsonify '{k2}'\n"
"e: !jsonify '{k3}'\n"
"f: !jsonify b {k4}\n")
assert roundtripped_string == expected
# endregion jsonify custom tag
# region py string custom tag
def test_py_string_behaves():
"""Py string does what it should."""
assert PyString.yaml_tag == '!py'
py = PyString('1+1')
assert str(py) == '1+1'
assert repr(py) == "PyString('1+1')"
assert py.get_value(Context()) == 2
def test_py_string_class_methods():
"""Py string yaml class methods serialize and deserialize class."""
mock_node = MagicMock()
mock_node.value = 'False and False'
new_instance = PyString.from_yaml(None, mock_node)
assert isinstance(new_instance, PyString)
assert str(new_instance) == 'False and False'
assert repr(new_instance) == "PyString('False and False')"
assert not new_instance.get_value(Context())
mock_representer = MagicMock()
PyString.to_yaml(mock_representer, mock_node)
mock_representer.represent_scalar.assert_called_once_with('!py',
'False and False'
)
def test_py_string_with_context():
"""Py string works with Context."""
assert PyString('len(a)').get_value(Context({'a': '123'})) == 3
def test_py_string_with_imports():
"""Py string can use imported global namespace."""
context = Context({'a': -3, 'b': 4})
from math import sqrt
context.pystring_globals_update({'squareroot': sqrt})
assert PyString('abs(a) + squareroot(b)').get_value(context) == 5
# imports don't end up in context
assert context == {'a': -3, 'b': 4}
# imports don't contain builtins
assert context._pystring_globals == {'squareroot': sqrt}
def test_py_string_with_closure_scope():
"""Free variables resolve."""
# NameError b is not defined if not a single global scope.
# Just 'a' will work, it's the nested scope that's the prob
source = "[f'{x}{y}' for x in a for y in b]"
context = Context({'a': '12', 'b': 'ab'})
assert PyString(source).get_value(context) == ['1a', '1b', '2a', '2b']
# should contain nothing because nothing added to global as part of eval.
assert context._pystring_globals == {}
# context not polluted.
assert context == {'a': '12', 'b': 'ab'}
def test_py_string_eq_and_neq():
"""Py string equivalence passes on repr."""
assert PyString('arb') == PyString('arb')
assert PyString('blah') != PyString('arb')
def test_py_string_repr_roundtrip():
"""Py string repr evals back to instance."""
s = PyString('len("three")')
repr_string = repr(s)
assert repr_string == 'PyString(\'len("three")\')'
reconstituted = eval(repr_string)
assert isinstance(reconstituted, PyString)
assert reconstituted.get_value(Context()) == 5
def test_py_string_empty():
"""Empty py string raises error."""
with pytest.raises(ValueError) as err:
PyString(None).get_value({})
assert str(err.value) == ('!py string expression is empty. It must be a '
'valid python expression instead.')
with pytest.raises(ValueError) as err:
PyString('').get_value(Context())
def test_py_string_truthy():
"""Empty Py String work as falsy, else Truthy."""
assert PyString('blah')
assert not PyString(None)
assert not PyString('')
# endregion py string custom tag
# region sic string custom tag
def test_sic_string_behaves():
"""Sic string does what it should."""
assert SicString.yaml_tag == '!sic'
sic = SicString('1+1')
assert str(sic) == '1+1'
assert repr(sic) == "SicString('1+1')"
assert sic.get_value({}) == '1+1'
def test_sic_string_class_methods():
"""Sic string yaml class methods serialize and deserialize class."""
mock_node = MagicMock()
mock_node.value = 'False {and} False'
new_instance = SicString.from_yaml(None, mock_node)
assert isinstance(new_instance, SicString)
assert str(new_instance) == 'False {and} False'
assert repr(new_instance) == "SicString('False {and} False')"
assert new_instance.get_value({}) == 'False {and} False'
mock_representer = MagicMock()
SicString.to_yaml(mock_representer, mock_node)
mock_representer.represent_scalar.assert_called_once_with(
'!sic',
'False {and} False'
)
def test_sic_string_with_context():
"""Sic string works with Context."""
assert SicString('len(a)').get_value(Context({'a': '123'})) == 'len(a)'
def test_sic_string_eq_and_neq():
"""Sic string equivalence passes on repr."""
assert SicString('arb') == SicString('arb')
assert SicString('blah') != SicString('arb')
def test_sic_string_repr_roundtrip():
"""Sic string repr evals back to instance."""
s = SicString('arb')
repr_string = repr(s)
assert repr_string == "SicString('arb')"
reconstituted = eval(repr_string)
assert isinstance(reconstituted, SicString)
assert reconstituted.get_value() == 'arb'
def test_sic_string_truthy():
"""Empty Sic String work as falsy, else Truthy."""
assert SicString('blah')
assert not SicString(None)
assert not SicString('')
# endregion sic string custom tag
# endregion custom yaml tags
# region test setup & fixtures
# region test context
def get_test_context():
"""Return a pypyr context for testing."""
return Context({
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77
})
# endregion test context
# region step mocks
def mock_run_step(context):
"""Arbitrary mock function to execute instead of run_step."""
context['test_run_step'] = 'this was set in step'
def mock_run_step_empty_context(context):
"""Clear the context in the step."""
context.clear()
def mock_run_step_none_context(context):
"""None the context in the step."""
# ignore the context is not used flake8 warning
context = None # noqa: F841
# endregion step mocks
# endregion test setup & fixtures
# region Step
# region Step: init
@patch('pypyr.moduleloader.get_module')
def test_simple_step_init_defaults(mocked_moduleloader):
"""Simple step initializes with defaults as expected."""
mocked_moduleloader.return_value.run_step = arb_step_mock
with patch_logger('pypyr.dsl') as mock_logger_debug:
step = Step('blah', 'stepsrunner')
mock_logger_debug.assert_any_call("blah is a simple string.")
assert step.name == 'blah'
assert step.run_step_function('blahblah') == 'from arb step mock'
assert step.foreach_items is None
assert not hasattr(step, 'for_counter')
assert step.in_parameters is None
assert not step.retry_decorator
assert step.run_me
assert not step.skip_me
assert step.steps_runner == 'stepsrunner'
assert not step.swallow_me
assert not step.while_decorator
assert step.line_no is None
assert step.line_col is None
mocked_moduleloader.assert_called_once_with('blah')
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_defaults(mocked_moduleloader):
"""Complex step initializes with defaults as expected."""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
with patch_logger('pypyr.dsl') as mock_logger_debug:
step = Step({'name': 'blah'}, 'stepsrunner')
assert mock_logger_debug.call_args_list == [
call("starting"),
call("blah is complex."),
call("step name: blah"),
call("done"),
]
assert step.name == 'blah'
assert step.run_step_function('blahblah') == 'from arb step mock'
assert step.foreach_items is None
assert not hasattr(step, 'for_counter')
assert step.in_parameters is None
assert not step.retry_decorator
assert step.run_me
assert not step.skip_me
assert step.steps_runner == 'stepsrunner'
assert not step.swallow_me
assert not step.while_decorator
assert step.line_col is None
assert step.line_no is None
mocked_moduleloader.assert_called_once_with('blah')
def test_complex_step_init_with_missing_name_round_trip():
"""Step can't get step name from the yaml pipeline."""
with pytest.raises(PipelineDefinitionError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step_info = CommentedMap({})
step_info._yaml_set_line_col(6, 7)
Step(step_info, None)
assert mock_logger_error.call_count == 1
assert mock_logger_error.mock_calls == [
call('Error at pipeline step yaml line: 7, col: 8'),
]
assert str(err_info.value) == "step must have a name."
@patch('pypyr.moduleloader.get_module', return_value=3)
def test_step_cant_get_run_step_dynamically(mocked_moduleloader):
"""Step can't get run_step method on the dynamically imported module."""
stepcache.step_cache.clear()
with pytest.raises(AttributeError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.cache.stepcache',
logging.ERROR) as mock_cache_logger_error:
Step('mocked.step', None)
mocked_moduleloader.assert_called_once_with('mocked.step')
mock_logger_error.assert_called_once_with(
'Error at pipeline step mocked.step')
mock_cache_logger_error.assert_called_once_with(
"The step mocked.step in module 3 doesn't have a "
"run_step(context) function.")
assert str(err_info.value) == "'int' object has no attribute 'run_step'"
@patch('pypyr.moduleloader.get_module', return_value=3)
def test_step_cant_get_run_step_dynamically_round_trip(mocked_moduleloader):
"""Step can't get run_step method on the dynamically imported module.
With round trip yaml loaded context.
"""
stepcache.step_cache.clear()
with pytest.raises(AttributeError) as err_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.cache.stepcache',
logging.ERROR) as mock_cache_logger_error:
commented_context = CommentedMap({'name': 'mocked.step'})
commented_context._yaml_set_line_col(1, 2)
Step(commented_context, None)
mocked_moduleloader.assert_called_once_with('mocked.step')
mock_logger_error.assert_called_once_with(
"Error at pipeline step mocked.step yaml line: 2, col: 3")
mock_cache_logger_error.assert_called_once_with(
"The step mocked.step in module 3 doesn't have a "
"run_step(context) function.")
assert str(err_info.value) == "'int' object has no attribute 'run_step'"
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_with_decorators(mocked_moduleloader):
"""Complex step initializes with decorators set."""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
step = Step({'name': 'blah',
'in': {'k1': 'v1', 'k2': 'v2'},
'foreach': [0],
'retry': {'max': 5, 'sleep': 7},
'run': False,
'skip': True,
'swallow': True,
'while': {'stop': 'stop condition',
'errorOnMax': True,
'sleep': 3,
'max': 4}
},
'stepsrunner')
assert step.name == 'blah'
assert step.run_step_function('blah') == 'from arb step mock'
assert step.foreach_items == [0]
assert step.foreach_items == [0]
assert step.in_parameters == {'k1': 'v1', 'k2': 'v2'}
assert step.retry_decorator.max == 5
assert step.retry_decorator.sleep == 7
assert step.retry_decorator.retry_counter is None
assert not step.run_me
assert step.skip_me
assert step.steps_runner == 'stepsrunner'
assert step.swallow_me
assert step.while_decorator.stop == 'stop condition'
assert step.while_decorator.error_on_max
assert step.while_decorator.sleep == 3
assert step.while_decorator.max == 4
assert step.while_decorator.while_counter is None
mocked_moduleloader.assert_called_once_with('blah')
@patch('pypyr.moduleloader.get_module')
def test_complex_step_init_with_decorators_roundtrip(mocked_moduleloader):
"""Complex step initializes with decorators.
Set with round trip yaml loaded context.
"""
stepcache.step_cache.clear()
mocked_moduleloader.return_value.run_step = arb_step_mock
context = CommentedMap({
'name': 'blah',
'in': {'k1': 'v1', 'k2': 'v2'},
'foreach': [0],
'retry': {'max': 5, 'sleep': 7},
'run': False,
'skip': True,
'swallow': True,
'while': {
'stop': 'stop condition',
'errorOnMax': True,
'sleep': 3,
'max': 4
}
}
)
context._yaml_set_line_col(8, 9)
step = Step(context, None)
assert step.name == 'blah'
assert step.run_step_function('blah') == 'from arb step mock'
assert step.foreach_items == [0]
assert step.for_counter is None
assert step.in_parameters == {'k1': 'v1', 'k2': 'v2'}
assert step.retry_decorator.max == 5
assert step.retry_decorator.sleep == 7
assert step.retry_decorator.retry_counter is None
assert not step.run_me
assert step.skip_me
assert step.swallow_me
assert step.while_decorator.stop == 'stop condition'
assert step.while_decorator.error_on_max
assert step.while_decorator.sleep == 3
assert step.while_decorator.max == 4
assert step.while_decorator.while_counter is None
assert step.line_no == 9
assert step.line_col == 10
mocked_moduleloader.assert_called_once_with('blah')
# endregion Step: init
# region Step: description
@patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator outputs notify description."""
step = Step({'name': 'step1',
'description': 'test {key1} description'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with('test value1 description')
mock_invoke_step.assert_called_once()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description_not_run(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'description': 'test description',
'run': '{key5}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with('(skipping): test description')
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_description_skip(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'description': 'test {key5} description',
'skip': True},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.NOTIFY) as mock_logger_notify:
step.run_step(context)
mock_logger_notify.assert_called_once_with(
'(skipping): test False description')
mock_logger_info.assert_any_call("step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
# endregion Step: description
# region Step: run_step: foreach
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch.object(Step, 'foreach_loop')
def test_foreach_none(mock_foreach, mock_run, mock_moduleloader):
"""Simple step with None foreach decorator doesn't loop."""
step = Step('step1', None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_foreach.assert_not_called()
mock_run.assert_called_once_with(get_test_context())
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch.object(Step, 'foreach_loop')
def test_foreach_empty(mock_foreach, mock_run, mock_moduleloader):
"""Complex step with empty foreach decorator doesn't loop."""
step = Step({'name': 'step1',
'foreach': []},
None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_foreach.assert_not_called()
mock_run.assert_called_once_with(get_test_context())
# validate all the in params ended up in context as intended
assert len(context) == original_len
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
def test_foreach_once(mock_run, mock_moduleloader):
"""The foreach loops once."""
step = Step({'name': 'step1',
'foreach': ['one']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step one'),
call('foreach decorator looped 1 times.')]
assert mock_run.call_count == 1
mutated_context = get_test_context()
mutated_context['i'] = 'one'
mock_run.assert_called_once_with(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
assert context['i'] == 'one'
assert step.for_counter == 'one'
assert step.for_counter == 'one'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_twice(mock_run, mock_moduleloader):
"""The foreach loops twice."""
step = Step({'name': 'step1',
'foreach': ['one', 'two']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step one'),
call('foreach: running step two'),
call('foreach decorator looped 2 times.')]
assert mock_run.call_count == 2
mutated_context = get_test_context()
mutated_context['i'] = 'one'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'two'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'two'
assert step.for_counter == 'two'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_thrice_with_substitutions(mock_run, mock_moduleloader):
"""The foreach loops thrice with substitutions inside a list."""
step = Step({'name': 'step1',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_run.call_count == 3
mutated_context = get_test_context()
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'run_conditional_decorators')
@patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_with_single_key_substitution(mock_run, mock_moduleloader):
"""The foreach gets list from string format expression."""
step = Step({'name': 'step1',
'foreach': '{list}'},
None)
context = get_test_context()
context['list'] = [99, True, 'string here', 'formatted {key1}']
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step 99'),
call('foreach: running step True'),
call('foreach: running step string here'),
call('foreach: running step formatted value1'),
call('foreach decorator looped 4 times.')]
assert mock_run.call_count == 4
mutated_context = get_test_context()
mutated_context['list'] = [99, True, 'string here', 'formatted {key1}']
mutated_context['i'] = 99
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = True
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'string here'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'formatted value1'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'formatted value1'
assert step.for_counter == 'formatted value1'
def mock_step_mutating_run(context):
"""Mock a step's run_step by setting a context value False."""
context['dynamic_run_expression'] = False
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_run)
def test_foreach_evaluates_run_decorator(mock_invoke, mock_moduleloader):
"""The foreach evaluates run_me expression on each loop iteration."""
step = Step({'name': 'step1',
'run': '{dynamic_run_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_run_expression'] = True
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('step1 not running because run is False.'),
call('foreach: running step key3'),
call('step1 not running because run is False.'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
def mock_step_mutating_skip(context):
"""Mock a step's run_step by setting a context value False."""
context['dynamic_skip_expression'] = True
@patch('pypyr.moduleloader.get_module')
@patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_skip)
def test_foreach_evaluates_skip_decorator(mock_invoke, mock_moduleloader):
"""The foreach evaluates skip expression on each loop iteration."""
step = Step({'name': 'step1',
'skip': '{dynamic_skip_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_skip_expression'] = False
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('step1 not running because skip is True.'),
call('foreach: running step key3'),
call('step1 not running because skip is True.'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
@patch('pypyr.moduleloader.get_module')
def test_foreach_evaluates_swallow_decorator(mock_moduleloader):
"""The foreach evaluates skip expression on each loop iteration."""
step = Step({'name': 'step1',
'swallow': '{dynamic_swallow_expression}',
'foreach': ['{key1}', '{key2}', 'key3']},
None)
context = get_test_context()
context['dynamic_swallow_expression'] = False
original_len = len(context)
arb_error = ValueError('arb error')
def mock_step_deliberate_error(context):
"""Mock step's run_step by setting swallow False and raising err."""
if context['i'] == 'value2':
context['dynamic_swallow_expression'] = True
elif context['i'] == 'key3':
raise arb_error
with patch.object(Step, 'invoke_step',
side_effect=mock_step_deliberate_error) as mock_invoke:
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_invoke.call_count == 3
assert mock_logger_error.call_count == 1
mock_logger_error.assert_called_once_with(
'step1 Ignoring error '
'because swallow is True for this step.\nValueError: arb error')
# validate all the in params ended up in context as intended, plus i,
# plus runErrors
assert len(context) == original_len + 2
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
def test_foreach_with_iterator():
"""Loop over iterator in foreach."""
context = Context({'lst': []})
from itertools import product
context.pystring_globals_update({'product': product})
step = Step({'name': 'pypyr.steps.py',
'foreach': PyString('product([1, 2], ["A", "B"])'),
'in': {'py': 'lst.append(i)'}
},
None)
step.run_step(context)
assert context == {'lst': [(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
'i': (2, 'B')}
def test_foreach_with_inline_iterator():
"""Loop over iterator in foreach."""
def myfunc():
yield from ['one', 'two', 'three']
context = Context({'lst': [],
'test_iterator': myfunc()})
step = Step({'name': 'pypyr.steps.py',
'foreach': PyString('test_iterator'),
'in': {'py': 'lst.append(i)'}
},
None)
step.run_step(context)
assert len(context) == 3
assert context['lst'] == ['one', 'two', 'three']
assert context['i'] == 'three'
# endregion Step: run_step
# region Step: run_step: while
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_max(mock_invoke, mock_moduleloader):
"""The while runs to max."""
step = Step({'name': 'step1',
'while': {'max': 3}},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus counter
assert len(context) == original_len + 1
# after the looping's done, the counter value will be the last iterator
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=mock_step_mutating_run)
def test_while_evaluates_run_decorator(mock_invoke, mock_moduleloader):
"""The while evaluates run_me expression on each loop iteration."""
step = Step({'name': 'step1',
'run': '{dynamic_run_expression}',
'while': {'max': '{whileMax}', 'stop': '{key5}'}},
None)
context = get_test_context()
context['dynamic_run_expression'] = True
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {key5} evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('step1 not running because run is False.'),
call('while: running step with counter 3'),
call('step1 not running because run is False.'),
call('while decorator looped 3 times, and {key5} never evaluated to '
'True.')]
assert mock_invoke.call_count == 1
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=[None, ValueError('whoops')])
def test_while_error_kicks_loop(mock_invoke, mock_moduleloader):
"""Error during while kicks loop."""
step = Step({'name': 'step1',
'while': {'max': 3}},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "whoops"
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2')]
assert mock_invoke.call_count == 2
# validate all the in params ended up in context as intended, plus i
# plus runErrors
assert len(context) == original_len + 2
# after the looping's done, the counter will be the last iterator value
assert context['whileCounter'] == 2
assert step.while_decorator.while_counter == 2
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'whoops',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_exhausts(mock_invoke, mock_moduleloader):
"""While exhausts throws error on errorOnMax."""
step = Step({'name': 'step1',
'while': {'max': '{whileMax}',
'stop': '{key5}',
'errorOnMax': '{key6}'}},
None)
context = get_test_context()
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(LoopMaxExhaustedError) as err_info:
step.run_step(context)
assert str(err_info.value) == ("while loop reached "
"3 and {key5} never evaluated to True.")
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {key5} evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_while_exhausts_hard_true(mock_invoke, mock_moduleloader):
"""While evaluates run_me expression on each loop iteration, no format."""
step = Step({'name': 'step1',
'while': {'max': '{whileMax}',
'stop': False,
'errorOnMax': True}},
None)
context = get_test_context()
context['whileMax'] = 3
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(LoopMaxExhaustedError) as err_info:
step.run_step(context)
assert str(err_info.value) == "while loop reached 3."
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until False evaluates to '
'True at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_invoke.call_count == 3
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 1
# after the looping's done, the i value will be the last iterator value
assert context['whileCounter'] == 3
assert step.while_decorator.while_counter == 3
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'run_conditional_decorators')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_while_nests_foreach_with_substitutions(mock_run, mock_moduleloader):
"""While loops twice, foreach thrice with substitutions inside a list."""
step = Step({'name': 'step1',
'foreach': ['{key1}', '{key2}', 'key3'],
'while': {'max': 2}
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 2 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.'),
call('while: running step with counter 2'),
call('foreach: running step value1'),
call('foreach: running step value2'),
call('foreach: running step key3'),
call('foreach decorator looped 3 times.')]
assert mock_run.call_count == 6
mutated_context = get_test_context()
mutated_context['whileCounter'] = 1
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
mutated_context['whileCounter'] = 2
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
# validate all the in params ended up in context as intended, plus i
assert len(context) == original_len + 2
# after the looping's done, the i value will be the last iterator value
assert context['i'] == 'key3'
assert step.for_counter == 'key3'
assert context['whileCounter'] == 2
assert step.while_decorator.while_counter == 2
# endregion Step: run_step: while
# region Step: invoke_step
@ patch('pypyr.moduleloader.get_module')
def test_invoke_step_pass(mocked_moduleloader):
"""run_pipeline_step test pass."""
stepcache.step_cache.clear()
step = Step('mocked.step', None)
step.invoke_step(get_test_context())
mocked_moduleloader.assert_called_once_with('mocked.step')
mocked_moduleloader.return_value.run_step.assert_called_once_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4', 'k4lk2': 'value5'},
{'k4lk1': 'value6', 'k4lk2': 'value7'}],
'key5': False,
'key6': True,
'key7': 77})
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_context_abides(mocked_stepcache):
"""Step mutates context & mutation abides after run_pipeline_step."""
mocked_stepcache.return_value = mock_run_step
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(context)
mocked_stepcache.assert_called_once_with('mocked.step')
assert context['test_run_step'] == 'this was set in step'
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_empty_context(mocked_stepcache):
"""Empty context in step (i.e count == 0, but not is None)."""
mocked_stepcache.return_value = mock_run_step_empty_context
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(context)
mocked_stepcache.assert_called_once_with('mocked.step')
assert len(context) == 0
assert context is not None
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_invoke_step_none_context(mocked_stepcache):
"""Step rebinding context to None doesn't affect the caller Context."""
mocked_stepcache.return_value = mock_run_step_none_context
context = get_test_context()
step = Step('mocked.step', None)
step.invoke_step(False)
mocked_stepcache.assert_called_once_with('mocked.step')
assert context == {'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4', 'k4lk2': 'value5'},
{'k4lk1': 'value6', 'k4lk2': 'value7'}],
'key5': False,
'key6': True,
'key7': 77}
# endregion Step: invoke_step
# region Step: reset_context_counters
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters(mock_step_cache):
"""Reset all counters in context."""
context = {'a': 'b',
'c': 'd',
'whileCounter': 99,
'retryCounter': 999,
'i': '9999'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', 'changed'))
step_config = {'name': 'blah',
'while': {
'max': 4
},
'foreach': ['one', 'two'],
'retry': {
'max': 5
}
}
step = Step(step_config, None)
step.while_decorator.while_counter = 6
step.for_counter = 'seven'
step.retry_decorator.retry_counter = 8
step.reset_context_counters(context, call)
assert context == {'a': 'changed',
'c': 'd',
'whileCounter': 6,
'i': 'seven',
'retryCounter': 8}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_dont_need_updating(mock_step_cache):
"""Reset all counters in context when they don't need to update."""
context = {'a': 'b',
'c': 'd',
'whileCounter': 99,
'retryCounter': 999,
'i': '9999'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', 'b'))
step_config = {'name': 'blah',
'while': {
'max': 4
},
'foreach': ['one', 'two'],
'retry': {
'max': 5
}
}
step = Step(step_config, None)
step.while_decorator.while_counter = 99
step.for_counter = '9999'
step.retry_decorator.retry_counter = 999
step.reset_context_counters(context, call)
assert context == {'a': 'b',
'c': 'd',
'whileCounter': 99,
'i': '9999',
'retryCounter': 999}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_none(mock_step_cache):
"""Reset but no counters available & key not found in context."""
context = {'a': 'b',
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('x', 'z'))
step_config = {'name': 'blah'}
step = Step(step_config, None)
step.reset_context_counters(context, call)
# reset added the key that didn't exist to context
assert context == {'a': 'b',
'c': 'd',
'x': 'z'}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_none_none(mock_step_cache):
"""Reset key to none should not be possible."""
context = {'a': 'b',
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('x', None))
step_config = {'name': 'blah'}
step = Step(step_config, None)
with pytest.raises(AssertionError):
step.reset_context_counters(context, call)
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_mutable(mock_step_cache):
"""Reset to a mutable object."""
arb_mutable = ['b']
context = {'a': arb_mutable,
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))
step_config = {'name': 'blah'}
step = Step(step_config, None)
step.reset_context_counters(context, call)
assert context == {'a': ['b'],
'c': 'd'}
@ patch('pypyr.cache.stepcache.step_cache.get_step')
def test_reset_context_counters_mutate(mock_step_cache):
"""Reset to a mutating mutable."""
arb_mutable = ['b']
context = {'a': arb_mutable,
'c': 'd'}
call = Call(['one', 'two'], 'sg', 'fg', ('a', arb_mutable))
step_config = {'name': 'blah'}
step = Step(step_config, None)
arb_mutable[0] = 'changed'
step.reset_context_counters(context, call)
assert context == {'a': ['changed'],
'c': 'd'}
# endregion Step: reset_context_counters
# region Step: run_step: run
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_true(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set true will run step."""
step = Step({'name': 'step1',
'run': True},
None)
context = get_test_context()
original_len = len(context)
step.run_step(context)
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_false(mock_invoke_step,
mock_get_module):
"""Complex step with run decorator set false doesn't run step."""
step = Step({'name': 'step1',
'run': False},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': '{key1}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_false(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string False doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': 'False'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_lower_false(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate False because it's a string and it's not 'True'.
'run': 'false'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_bool_formatting_true(
mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression true runs step."""
step = Step({
'name': 'step1',
# key6 will evaluate True because it's a bool and it's True
'run': '{key6}'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_string_true(mock_invoke_step,
mock_get_module):
"""Complex step with run formatting expression True runs step."""
step = Step({
'name': 'step1',
# 'True' will evaluate bool True
'run': 'True'},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 1 runs step."""
step = Step({
'name': 'step1',
# 1 will evaluate True because it's an int and 1
'run': 1},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_99_true(mock_invoke_step,
mock_get_module):
"""Complex step with run 99 runs step."""
step = Step({
'name': 'step1',
# 99 will evaluate True because it's an int and > 0
'run': 99
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_neg1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run -1 runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'run': -1
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_single_retry(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 10}
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 1})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
assert context['retryCounter'] == 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_with_retries(mock_invoke_step,
mock_get_module):
"""Complex step with retry runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'retry': {'max': 0}
},
None)
context = get_test_context()
original_len = len(context)
mock_invoke_step.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
assert mock_invoke_step.call_count == 2
mock_invoke_step.assert_called_with(
{'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'retryCounter': 2})
# validate all the in params ended up in context as intended
assert len(context) == original_len + 1
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_on_error(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
complex_step_info = CommentedMap({
'name': 'step1',
'swallow': 0,
'onError': {'arb': 'value'}
})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info, None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
mock_logger_error.assert_called_once_with(
"Error while running step step1 at pipeline yaml line: 6, col: 7")
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': 7,
'customError': {'arb': 'value'},
'description': 'arb error here',
'exception': err_info.value,
'line': 6,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
# endregion Step: run_step: run
# region Step: run_step: skip
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_false(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set false will run step."""
step = Step({
'name': 'step1',
'skip': False
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip decorator set true runa step."""
step = Step({
'name': 'step1',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression false doesn't run step."""
step = Step({
'name': 'step1',
# name will evaluate True
'skip': '{key6}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_true(mock_invoke_step,
mock_get_module):
"""Complex step with skip set to string False doesn't run step."""
step = Step({
'name': 'step1',
# skip evaluates True because it's a string and TRUE parses to True.
'skip': 'TRUE'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_lower_true(mock_invoke_step,
mock_get_module):
"""Complex step with run set to string true doesn't run step."""
step = Step({
'name': 'step1',
# skip will evaluate true because it's a string and true is True.
'skip': 'true'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_and_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with run doesn't run step, evals before skip."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'run': '{key5}',
'skip': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because run is False.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_bool_formatting_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression true runs step."""
step = Step({
'name': 'step1',
# key5 will evaluate False because it's a bool and it's False
'skip': '{key5}'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_string_false(
mock_invoke_step,
mock_get_module):
"""Complex step with skip formatting expression False runs step."""
step = Step({
'name': 'step1',
# 'False' will evaluate bool False
'skip': 'False'
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_0_true(
mock_invoke_step,
mock_get_module):
"""Complex step with run 1 runs step."""
step = Step({
'name': 'step1',
# 0 will evaluate False because it's an int and 0
'skip': 0
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_99_true(
mock_invoke_step,
mock_get_module):
"""Complex step with skip 99 doesn't run step."""
step = Step({
'name': 'step1',
# 99 will evaluate True because it's an int and > 0
'skip': 99
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call(
"step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_neg1_true(mock_invoke_step,
mock_get_module):
"""Complex step with run -1 runs step."""
step = Step({
'name': 'step1',
# -1 will evaluate True because it's an int and != 0
'skip': -1
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call("step1 not running because skip is True.")
mock_invoke_step.assert_not_called()
# validate all the in params ended up in context as intended
assert len(context) == original_len
# endregion Step: run_step: skip
# region Step: run_step: swallow
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_swallow_true(mock_invoke_step,
mock_get_module):
"""Complex step with swallow true runs normally even without error."""
step = Step({
'name': 'step1',
'swallow': True
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
def test_run_pipeline_steps_complex_swallow_false(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false runs normally even without error."""
step = Step({
'name': 'step1',
'swallow': False
},
None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended
assert len(context) == original_len
@ patch('pypyr.moduleloader.get_module')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_pipeline_steps_complex_swallow_true_error(mock_get_module):
"""Complex step with swallow true swallows error."""
step = Step({
'name': 'step1',
'swallow': 1
},
None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError('arb error here')
with patch.object(
Step, 'invoke_step', side_effect=arb_error) as mock_invoke_step:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step.run_step(context)
mock_logger_debug.assert_any_call("done")
mock_logger_error.assert_called_once_with(
"step1 Ignoring error because swallow is True "
"for this step.\n"
"ValueError: arb error here")
mock_invoke_step.assert_called_once_with(
context={'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77})
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_swallow_false_error(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
step = Step({
'name': 'step1',
'swallow': 0
},
None)
context = get_test_context()
original_len = len(context)
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_round_trip(mock_invoke_step,
mock_get_module):
"""Complex step with swallow false raises error."""
complex_step_info = CommentedMap({
'name': 'step1',
'swallow': 0
})
complex_step_info._yaml_set_line_col(5, 6)
step = Step(complex_step_info, None)
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
mock_logger_error.assert_called_once_with(
"Error while running step step1 at pipeline yaml line: 6, col: 7")
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': 7,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': 6,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
def test_run_pipeline_steps_complex_swallow_defaults_false_error(
mock_invoke_step,
mock_get_module):
"""Complex step with swallow not specified still raises error."""
step = Step({
'name': 'step1'
},
None)
context = get_test_context()
original_len = len(context)
with pytest.raises(ValueError) as err_info:
step.run_step(context)
assert str(err_info.value) == "arb error here"
# validate all the in params ended up in context as intended,
# plus runErrors
assert len(context) == original_len + 1
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error here',
'exception': err_info.value,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step', side_effect=ValueError('arb error here'))
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_pipeline_steps_simple_with_error(mock_invoke_step,
mock_get_module):
"""Simple step run with error should not swallow."""
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step = Step('step1', None)
with pytest.raises(ValueError) as err_info:
step.run_step(Context({'k1': 'v1'}))
assert str(err_info.value) == "arb error here"
mock_logger_debug.assert_any_call('step1 is a simple string.')
mock_invoke_step.assert_called_once_with(
context={'k1': 'v1'})
# endregion Step: run_step: swallow
# region Step: run_step: input context
@ patch('pypyr.moduleloader.get_module')
@ patch.object(Step, 'invoke_step')
@ patch('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_step_in_with_clean(mock_invoke_step, mock_get_module):
"""Step sets 'in' arguments in context, unset from context when done."""
step = Step({
'name': 'step1',
'in': {
'key1': 'updated1',
'key2': 'updated2',
'keyadded': 'added3'
}
},
None)
context = get_test_context()
step.run_step(context)
# step called with context updated with 'in' arguments
assert mock_invoke_step.call_count == 1
assert mock_invoke_step.call_args_list[0] == call(context={
'key1': 'updated1',
'key2': 'updated2',
'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77,
'keyadded': 'added3'})
# context when done has 'in' args removed.
assert context == {'key3': 'value3',
'key4': [
{'k4lk1': 'value4',
'k4lk2': 'value5'},
{'k4lk1': 'value6',
'k4lk2': 'value7'}
],
'key5': False,
'key6': True,
'key7': 77}
# endregion Step: run_step: input context
# region Step: set_step_input_context
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_no_in_simple(mocked_moduleloader):
"""Set step context does nothing if no in key found in simple step."""
step = Step('blah', None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_no_in_complex(mocked_moduleloader):
"""Set step context does nothing if no in key found in complex step."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_in_empty(mocked_moduleloader):
"""Set step context does nothing if in key found but it's empty."""
step = Step({'name': 'blah', 'in': {}}, None)
context = get_test_context()
step.set_step_input_context(context)
assert context == get_test_context()
@ patch('pypyr.moduleloader.get_module')
def test_set_step_input_context_with_in(mocked_moduleloader):
"""Set step context adds in to context."""
context = get_test_context()
original_len = len(context)
in_args = {'newkey1': 'v1',
'newkey2': 'v2',
'key3': 'updated in',
'key4': [0, 1, 2, 3],
'key5': True,
'key6': False,
'key7': 88}
step = Step({'name': 'blah', 'in': in_args}, None)
step.set_step_input_context(context)
assert len(context) - 2 == original_len
assert context['newkey1'] == 'v1'
assert context['newkey2'] == 'v2'
assert context['key1'] == 'value1'
assert context['key2'] == 'value2'
assert context['key3'] == 'updated in'
assert context['key4'] == [0, 1, 2, 3]
assert context['key5']
assert not context['key6']
assert context['key7'] == 88
# endregion Step: set_step_input_context
# region Step: unset_step_input_context
def test_unset_step_input_context_in_none():
"""Unset works when in parameters None."""
context = get_test_context()
step = Step({'name': 'blah', 'in': None}, None)
step.unset_step_input_context(context)
# Nothing removed because 'in' was None
assert context == get_test_context()
def test_unset_step_input_context_in_empty():
"""Unset works when in parameters exists but is empty."""
context = get_test_context()
step = Step({'name': 'blah', 'in': {}}, None)
step.unset_step_input_context(context)
# Nothing removed because 'in' was empty list
assert context == get_test_context()
def test_unset_step_input_context():
"""Unset works when in parameters specified."""
context = get_test_context()
in_args = {'newkey1': 'v1',
'newkey2': 'v2',
'key3': 'updated in',
'key4': [0, 1, 2, 3],
'key5': True,
'key6': False,
'key7': 88}
step = Step({'name': 'blah', 'in': in_args}, None)
step.unset_step_input_context(context)
# Removed existing keys & non-existing keys specified in 'in' from context
assert context == {'key1': 'value1',
'key2': 'value2'}
# endregion Step: unset_step_input_context
# region Step: save_error
@ patch('pypyr.moduleloader.get_module')
def test_save_error_with_no_previous_errors_in_context(mocked_moduleloader):
"""Save error."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=False)
assert len(context) == original_len + 1
# validate all except runErrors
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': None,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_round_trip(mocked_moduleloader):
"""Save error with CommentedMap."""
context = get_test_context()
step_info = CommentedMap({'name': 'arb step'})
step_info._yaml_set_line_col(6, 7)
step = Step(step_info, None)
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=True)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': 8,
'customError': {},
'description': 'arb error',
'exception': arb_error,
'line': 7,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_formatted(mocked_moduleloader):
"""Save error with formatting expression."""
step = Step({'name': 'blah', 'onError': {'key': '{key1}'}}, None)
context = get_test_context()
original_len = len(context)
arb_error = ValueError("arb error")
step.save_error(context, exception=arb_error, swallowed=False)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert context['runErrors'] == [{
'col': None,
'customError': {'key': 'value1'},
'description': 'arb error',
'exception': arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': False,
}]
@ patch('pypyr.moduleloader.get_module')
def test_save_error_multiple_call(mocked_moduleloader):
"""Save multiple errors."""
step = Step({'name': 'blah'}, None)
context = get_test_context()
original_len = len(context)
first_arb_error = ValueError("arb error first")
step.save_error(context, exception=first_arb_error, swallowed=True)
second_arb_error = RuntimeError("arb error second")
step.save_error(context, exception=second_arb_error, swallowed=False)
assert len(context) == original_len + 1
assert get_test_context().items() <= context.items()
assert len(context['runErrors']) == 2
assert context['runErrors'][0] == {
'col': None,
'customError': {},
'description': 'arb error first',
'exception': first_arb_error,
'line': None,
'name': 'ValueError',
'step': step.name,
'swallowed': True,
}
assert context['runErrors'][1] == {
'col': None,
'customError': {},
'description': 'arb error second',
'exception': second_arb_error,
'line': None,
'name': 'RuntimeError',
'step': step.name,
'swallowed': False,
}
# endregion Step: save_error
# endregion Step
# region RetryDecorator
# region RetryDecorator: init
def test_retry_init_defaults_all():
"""The RetryDecorator ctor sets defaults with nothing set."""
rd = RetryDecorator({})
assert rd.backoff is None
assert rd.backoff_args is None
assert rd.jrc == 0
assert rd.max is None
assert rd.sleep_max is None
assert rd.sleep == 0
assert rd.stop_on is None
assert rd.retry_on is None
assert rd.retry_counter is None
def test_retry_init_defaults_max():
"""The RetryDecorator ctor sets defaults with only max set."""
rd = RetryDecorator({'max': 3})
assert rd.backoff is None
assert rd.backoff_args is None
assert rd.jrc == 0
assert rd.max == 3
assert rd.sleep_max is None
assert rd.sleep == 0
assert rd.stop_on is None
assert rd.retry_on is None
assert rd.retry_counter is None
def test_retry_init_all_attributes():
"""The RetryDecorator ctor with all props set."""
rd = RetryDecorator({'max': 3,
'sleep': 4.4,
'retryOn': [1, 2, 3],
'stopOn': [4, 5, 6],
'backoff': 'arb',
'sleepMax': 5.5,
'jrc': 6.6,
'backoffArgs': {'a': 'b'}}
)
assert rd.backoff == 'arb'
assert rd.backoff_args == {'a': 'b'}
assert rd.jrc == 6.6
assert rd.max == 3
assert rd.sleep_max == 5.5
assert rd.sleep == 4.4
assert rd.stop_on == [4, 5, 6]
assert rd.retry_on == [1, 2, 3]
assert rd.retry_counter is None
def test_retry_init_not_a_dict():
"""The RetryDecorator raises PipelineDefinitionError on bad ctor input."""
with pytest.raises(PipelineDefinitionError) as err_info:
RetryDecorator('arb')
assert str(err_info.value) == (
"retry decorator must be a dict (i.e a map) type.")
# endregion RetryDecorator: init
# region RetryDecorator: exec_iteration
def test_retry_exec_iteration_returns_true_on_success():
"""exec_iteration returns True when no error on step method."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
assert rd.retry_counter == 2
def test_retry_exec_iteration_returns_true_on_max_success():
"""exec_iteration returns True when no error on step method on max."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert rd.exec_iteration(3, context, mock, 3)
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
def test_retry_exec_iteration_returns_false_on_error():
"""exec_iteration returns True when no error on step method."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_with_retryon():
"""exec_iteration returns False when error specified in retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_with_retryon_format():
"""exec_iteration returns False when error in retryOn with format."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', '{k1}']})
context = Context({'k1': 'ValueError'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': 'ValueError', 'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError in retryOn. Retry again.')
def test_retry_exec_iteration_raises_on_error_not_in_retryon():
"""exec_iteration raises when error not in retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'BlahError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError not in retryOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_error_in_stopon():
"""exec_iteration raises when error in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_error_in_stopon_format():
"""exec_iteration raises when error in stopOn with formatting."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ValueError']})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ValueError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_returns_false_on_error_not_in_stopon():
"""exec_iteration returns False when error specified in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ArbError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_returns_false_on_error_not_in_stopon_format():
"""exec_iteration returns False when error specified in stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ArbError']})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ArbError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError not in stopOn. Continue.')
def test_retry_exec_iteration_raises_on_error_in_stopon_with_retryon():
"""exec_iteration stopOn supersedes retryOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ValueError'],
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_raises_on_max_exhaust():
"""exec_iteration raises error if counter is max."""
rd = RetryDecorator({'max': 3})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(3, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
mock_logger_debug.assert_called_with('retry: max 3 retries '
'exhausted. raising error.')
def test_retry_exec_iteration_raises_on_max_exhaust_with_retryon():
"""exec_iteration raises error if counter is max and supersedes retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'ValueError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(3, context, mock, 3)
assert str(err_info.value) == 'arb'
# context endures
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 3})
mock_logger_debug.assert_called_with('retry: max 3 retries '
'exhausted. raising error.')
def test_retry_exec_iteration_handlederror():
"""Use inner exception when error type is HandledError."""
rd = RetryDecorator({'max': 3,
'stopOn': ['KeyError', 'ArbError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
def test_retry_exec_iteration_handlederror_with_stopon():
"""exec_iteration evals inner error against stopon list."""
rd = RetryDecorator({'max': 3,
'stopOn': '{k1}'})
context = Context({'k1': ['KeyError', 'ArbError']})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
assert not rd.exec_iteration(2, context, mock, 3)
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'k1': ['KeyError', 'ArbError'],
'retryCounter': 2})
mock_logger_error.assert_called_once_with('retry: ignoring error because '
'retryCounter < max.\n'
'ValueError: arb')
mock_logger_debug.assert_any_call('ValueError not in stopOn. Continue.')
def test_retry_exec_iteration_handlederror_stopon_raises():
"""exec_iteration raises HandledError on stopOn."""
rd = RetryDecorator({'max': 3,
'stopOn': ['ValueError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(HandledError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert isinstance(err_info.value.__cause__, ValueError)
assert str(err_info.value.__cause__) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError in stopOn. Raising error and exiting retry.')
def test_retry_exec_iteration_handlederror_retryon_raises():
"""exec_iteration raises HandledError on retryOn."""
rd = RetryDecorator({'max': 3,
'retryOn': ['KeyError', 'BlahError']})
context = Context({})
mock = MagicMock()
err = HandledError()
err.__cause__ = ValueError('arb')
mock.side_effect = err
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(HandledError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert isinstance(err_info.value.__cause__, ValueError)
assert str(err_info.value.__cause__) == 'arb'
# context endures
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with(
'ValueError not in retryOn. Raising error and exiting retry.')
# endregion RetryDecorator: exec_iteration
# region RetryDecorator: retry_loop
@patch('time.sleep')
def test_retry_loop_max_end_on_error(mock_time_sleep):
"""Retry loops until max and ends with error at end."""
rd = RetryDecorator({'max': 3})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
rd.retry_loop(context, mock)
assert str(err_info.value) == 'arb'
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting at '
'0s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@patch('time.sleep')
def test_retry_loop_max_end_on_error_substitution(mock_time_sleep):
"""Retry loops with substitution until max and ends with error at end."""
rd = RetryDecorator({'max': PyString('3')})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with pytest.raises(ValueError) as err_info:
rd.retry_loop(context, mock)
assert str(err_info.value) == 'arb'
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting '
'at 0s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@patch('time.sleep')
def test_retry_loop_max_continue_on_success(mock_time_sleep):
"""Retry loops breaks out of loop on success."""
rd = RetryDecorator({'max': 3, 'sleep': 10.1})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 2
assert rd.retry_counter == 2
assert mock.call_count == 2
mock.assert_called_with({'k1': 'v1', 'retryCounter': 2})
assert mock_time_sleep.call_count == 1
mock_time_sleep.assert_called_with(10.1)
mock_logger_debug.assert_any_call(
'retry loop complete, reporting success.')
assert mock_logger_info.mock_calls == [
call('retry decorator will try 3 times with fixed backoff starting at '
'10.1s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2')]
@patch('time.sleep')
def test_retry_loop_max_continue_on_success_fixed_list(mock_time_sleep):
"""Retry loops breaks out of loop on success with list input to fixed."""
rd = RetryDecorator({'max': 5, 'sleep': [10.1, 10.2]})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb'),
ValueError('arb'),
ValueError('arb'),
None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert mock.call_count == 4
mock.assert_called_with({'k1': 'v1', 'retryCounter': 4})
assert mock_time_sleep.call_count == 3
# list cycles over last element
mock_time_sleep.mock_calls == [call(10.1), call(10.2), call(10.2)]
mock_logger_debug.assert_any_call(
'retry loop complete, reporting success.')
assert mock_logger_info.mock_calls == [
call('retry decorator will try 5 times with fixed backoff starting at '
'[10.1, 10.2]s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3'),
call('retry: running step with counter 4')]
@ patch('time.sleep')
def test_retry_loop_indefinite_continue_on_success(mock_time_sleep):
"""Retry loops breaks out of indefinite loop on success."""
rd = RetryDecorator({'sleep': 10.1})
context = Context({'k1': 'v1'})
mock = MagicMock()
mock.side_effect = [ValueError('arb1'), ValueError('arb2'), None]
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
rd.retry_loop(context, mock)
assert context['retryCounter'] == 3
assert rd.retry_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'retryCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(10.1)
assert mock_logger_info.mock_calls == [
call('retry decorator will try indefinitely with fixed backoff '
'starting at 10.1s intervals.'),
call('retry: running step with counter 1'),
call('retry: running step with counter 2'),
call('retry: running step with counter 3')]
@ patch('time.sleep')
def test_retry_all_substitutions(mock_time_sleep):
"""Retry loop runs every param substituted."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}'})
context = Context({'k1': False,
'k2': 0.3,
'k3': [
0,
{'k031': 1, 'k032': False}
]})
step_count = 0
def mock_step(context):
nonlocal step_count
step_count += 1
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 1
assert rd.retry_counter == 1
assert step_count == 1
assert mock_time_sleep.call_count == 0
assert mock_logger_info.mock_calls == [
call('retry decorator will try 1 times with fixed backoff starting at '
'0.3s intervals.'),
call('retry: running step with counter 1')]
@ patch('pypyr.retries.random.uniform', side_effect=[11, 12, 13])
@ patch('time.sleep')
def test_retry_all_substitutions_backoff(mock_sleep, mock_random):
"""Retry loop runs every param substituted with non-default backoff."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}',
'backoff': '{k6}',
'jrc': '{k4}',
'sleepMax': '{k5}',
'backoffArgs': {'base': '{k7}', 'arb': '{k8}'}})
context = Context({'k1': False,
'k2': 3,
'k3': [
0,
{'k031': 4, 'k032': False}
],
'k4': 0.5,
'k5': 30,
'k6': 'exponentialjitter',
'k7': 3,
'k8': 'a value',
'step_count': 0})
def mock_step(context):
context['step_count'] += 1
if context['step_count'] != 4:
raise ValueError()
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert context['step_count'] == 4
assert mock_sleep.mock_calls == [call(11), call(12), call(13)]
assert mock_random.mock_calls == [call(4.5, 9),
call(13.5, 27),
call(15, 30)]
@ patch('pypyr.retries.random.uniform', side_effect=[11, 12, 13])
@ patch('time.sleep')
def test_retry_all_substitutions_backoff_jitter_list(mock_sleep, mock_random):
"""Retry loop runs fixed jitter with list."""
rd = RetryDecorator({'max': '{k3[1][k031]}',
'sleep': '{k2}',
'backoff': '{k6}',
'jrc': '{k4}',
'sleepMax': '{k5}'})
context = Context({'k1': False,
'k2': [0.3, 0.2, 0.1],
'k3': [
0,
{'k031': 4, 'k032': False}
],
'k4': 2,
'k5': 0.25,
'k6': 'jitter',
'step_count': 0})
def mock_step(context):
context['step_count'] += 1
if context['step_count'] != 4:
raise ValueError()
rd.retry_loop(context, mock_step)
assert context['retryCounter'] == 4
assert rd.retry_counter == 4
assert context['step_count'] == 4
assert mock_sleep.mock_calls == [call(11), call(12), call(13)]
assert mock_random.mock_calls == [call(0.5, 0.25),
call(0.4, 0.2),
call(0.2, 0.1)]
# endregion RetryDecorator: retry_loop
# endregion RetryDecorator
# region WhileDecorator
# region WhileDecorator: init
def test_while_init_defaults_stop():
"""The WhileDecorator ctor sets defaults with only stop set."""
wd = WhileDecorator({'stop': 'arb'})
assert wd.stop == 'arb'
assert wd.sleep == 0
assert wd.max is None
assert not wd.error_on_max
assert wd.while_counter is None
def test_while_init_defaults_max():
"""The WhileDecorator ctor sets defaults with only max set."""
wd = WhileDecorator({'max': 3})
assert wd.stop is None
assert wd.sleep == 0
assert wd.max == 3
assert not wd.error_on_max
assert wd.while_counter is None
def test_while_init_all_attributes():
"""The WhileDecorator ctor with all props set."""
wd = WhileDecorator(
{'errorOnMax': True, 'max': 3, 'sleep': 4.4, 'stop': '5'})
assert wd.stop == '5'
assert wd.sleep == 4.4
assert wd.max == 3
assert wd.error_on_max
assert wd.while_counter is None
def test_while_init_not_a_dict():
"""The WhileDecorator raises PipelineDefinitionError on bad ctor input."""
with pytest.raises(PipelineDefinitionError) as err_info:
WhileDecorator('arb')
assert str(err_info.value) == (
"while decorator must be a dict (i.e a map) type.")
def test_while_init_no_max_no_stop():
"""The WhileDecorator raises PipelineDefinitionError no max and no stop."""
with pytest.raises(PipelineDefinitionError) as err_info:
WhileDecorator({'arb': 'arbv'})
assert str(err_info.value) == (
"the while decorator must have either max or "
"stop, or both. But not neither. Note that setting stop: False with "
"no max is an infinite loop. If an infinite loop is really what you "
"want, set stop: False")
# endregion WhileDecorator: init
# region WhileDecorator: exec_iteration
def test_while_exec_iteration_no_stop():
"""exec_iteration returns False when no stop condition given."""
wd = WhileDecorator({'max': 3})
context = Context({})
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_true():
"""exec_iteration returns True when stop is bool True."""
wd = WhileDecorator({'stop': True})
context = Context({})
mock = MagicMock()
assert wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_evals_true():
"""exec_iteration True when stop evals True from formatting expr."""
wd = WhileDecorator({'stop': '{stop}'})
context = Context({'stop': True})
mock = MagicMock()
assert wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'stop': True, 'whileCounter': 2})
def test_while_exec_iteration_stop_false():
"""exec_iteration False when stop is False."""
wd = WhileDecorator({'max': 1, 'stop': False})
context = Context()
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 1
# step_method called once and only once with updated context
mock.assert_called_once_with({'whileCounter': 2})
def test_while_exec_iteration_stop_evals_false():
"""exec_iteration False when stop is False."""
wd = WhileDecorator({'stop': '{stop}'})
context = Context({'stop': False})
mock = MagicMock()
assert not wd.exec_iteration(2, context, mock)
# context endures
assert context['whileCounter'] == 2
assert wd.while_counter == 2
assert len(context) == 2
# step_method called once and only once with updated context
mock.assert_called_once_with({'stop': False, 'whileCounter': 2})
# endregion WhileDecorator: exec_iteration
# region WhileDecorator: while_loop
def test_while_loop_stop_true():
"""Stop True runs loop once because it only evals after 1st iteration."""
wd = WhileDecorator({'stop': True})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context(), mock)
mock.assert_called_once()
assert mock_logger_info.mock_calls == [
call('while decorator will loop until True evaluates to True '
'at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while loop done, stop condition True evaluated True.')]
assert wd.while_counter == 1
def test_while_loop_max_0():
"""Max 0 doesn't run even once."""
wd = WhileDecorator({'max': 0})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context(), mock)
mock.assert_not_called()
assert mock_logger_info.mock_calls == [
call('max 0 is 0. while only runs when max > 0.')]
assert wd.while_counter == 0
def test_while_loop_max_0_with_formatting():
"""Max 0 doesn't run even once with formatting expression."""
wd = WhileDecorator({'max': '{x}'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context({'x': -3}), mock)
mock.assert_not_called()
assert mock_logger_info.mock_calls == [
call('max {x} is -3. while only runs when max > 0.')]
assert wd.while_counter == 0
def test_while_loop_stop_evals_true():
"""Stop evaluates True from formatting expr runs once."""
wd = WhileDecorator({'stop': '{thisistrue}'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(Context({'thisistrue': True}), mock)
mock.assert_called_once()
assert wd.while_counter == 1
assert mock_logger_info.mock_calls == [
call('while decorator will loop until {thisistrue} evaluates to True '
'at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while loop done, stop condition {thisistrue} evaluated True.')]
def test_while_loop_no_stop_no_max():
"""No stop, no max should raise error."""
wd = WhileDecorator({'stop': True})
wd.max = None
wd.stop = None
mock = MagicMock()
with pytest.raises(PipelineDefinitionError) as err_info:
wd.while_loop(Context(), mock)
mock.assert_not_called()
assert str(err_info.value) == (
"the while decorator must have either max or "
"stop, or both. But not neither.")
@ patch('time.sleep')
def test_while_loop_max_no_stop(mock_time_sleep):
"""While loop runs with max but no stop."""
wd = WhileDecorator({'max': 3})
context = Context({'k1': 'v1'})
mock = MagicMock()
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert mock.call_count == 3
mock.assert_called_with({'k1': 'v1', 'whileCounter': 3})
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.0s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
@ patch('time.sleep')
def test_while_loop_stop_no_max(mock_time_sleep):
"""While loop runs with stop but no max."""
wd = WhileDecorator({'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
if context['whileCounter'] == 3:
context['k1'] = True
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop until {k1} evaluates to True at 0.3s '
'intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while loop done, stop condition {k1} evaluated True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_stop_before_max(mock_time_sleep):
"""While loop runs with stop and max, exit before max."""
wd = WhileDecorator({'max': 5, 'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
if context['whileCounter'] == 3:
context['k1'] = True
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 5 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while loop done, stop condition {k1} evaluated True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_exhaust_max(mock_time_sleep):
"""While loop runs with stop and max, exhaust max."""
wd = WhileDecorator({'max': 3, 'stop': '{k1}', 'sleep': '{k2}'})
context = Context({'k1': False, 'k2': 0.3})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False, 'k2': 0.3, 'whileCounter': 1},
{'k1': False, 'k2': 0.3, 'whileCounter': 2},
{'k1': False, 'k2': 0.3, 'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3'),
call('while decorator looped 3 times, and {k1} never evaluated to '
'True.')]
@ patch('time.sleep')
def test_while_loop_stop_and_max_exhaust_error(mock_time_sleep):
"""While loop runs with stop and max, exhaust max."""
wd = WhileDecorator({'max': 3,
'stop': '{k1}',
'sleep': '{k2}',
'errorOnMax': '{k3}'})
context = Context({'k1': False, 'k2': 0.3, 'k3': True})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(LoopMaxExhaustedError) as err_info:
wd.while_loop(context, mock_step)
assert str(err_info.value) == (
"while loop reached 3 and {k1} never evaluated to True.")
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 1},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 2},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_logger_error.mock_calls == [
call('exhausted 3 iterations of while loop, and errorOnMax is True.')
]
@ patch('time.sleep')
def test_while_loop_max_exhaust_error(mock_time_sleep):
"""While loop runs with only max, exhaust max."""
wd = WhileDecorator({'max': 3,
'sleep': '{k2}',
'errorOnMax': True})
context = Context({'k1': False, 'k2': 0.3, 'k3': True})
step_count = 0
step_context = []
def mock_step(context):
nonlocal step_count, step_context
step_count += 1
step_context.append(deepcopy(context))
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(LoopMaxExhaustedError) as err_info:
wd.while_loop(context, mock_step)
assert str(err_info.value) == "while loop reached 3."
assert context['whileCounter'] == 3
assert wd.while_counter == 3
assert step_count == 3
assert step_context == [{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 1},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 2},
{'k1': False,
'k2': 0.3,
'k3': True,
'whileCounter': 3}]
assert mock_time_sleep.call_count == 2
mock_time_sleep.assert_called_with(0.3)
assert mock_logger_info.mock_calls == [
call('while decorator will loop 3 times at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while: running step with counter 2'),
call('while: running step with counter 3')]
assert mock_logger_error.mock_calls == [
call('exhausted 3 iterations of while loop, and errorOnMax is True.')
]
@ patch('time.sleep')
def test_while_loop_all_substitutions(mock_time_sleep):
"""While loop runs every param substituted."""
wd = WhileDecorator({'max': '{k3[1][k031]}',
'stop': '{k1}',
'sleep': '{k2}',
'errorOnMax': '{k3[1][k032]}'})
context = Context({'k1': False,
'k2': 0.3,
'k3': [
0,
{'k031': 1, 'k032': False}
]})
step_count = 0
def mock_step(context):
nonlocal step_count
step_count += 1
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
wd.while_loop(context, mock_step)
assert context['whileCounter'] == 1
assert wd.while_counter == 1
assert step_count == 1
assert mock_time_sleep.call_count == 0
assert mock_logger_info.mock_calls == [
call('while decorator will loop 1 times, or until {k1} evaluates to '
'True at 0.3s intervals.'),
call('while: running step with counter 1'),
call('while decorator looped 1 times, and {k1} never evaluated to '
'True.')]
# endregion WhileDecorator: while_loop
# endregion WhileDecorator
| 33.510997
| 79
| 0.606922
| 17,448
| 143,226
| 4.770117
| 0.032611
| 0.022588
| 0.021411
| 0.022829
| 0.863461
| 0.837857
| 0.808144
| 0.779067
| 0.752875
| 0.740235
| 0
| 0.019826
| 0.270656
| 143,226
| 4,273
| 80
| 33.518839
| 0.776922
| 0.123392
| 0
| 0.738621
| 0
| 0.001004
| 0.198205
| 0.025924
| 0
| 0
| 0
| 0
| 0.247657
| 1
| 0.059906
| false
| 0.000335
| 0.00502
| 0
| 0.0666
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a90e94ee26a9d59439387104b0f656edef77023
| 17,254
|
py
|
Python
|
cinder/tests/unit/scheduler/test_capacity_weigher.py
|
helenwalsh/cinder
|
307fccea4cc9c6496334b0fe137206cb48499bd5
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/unit/scheduler/test_capacity_weigher.py
|
BelieveInFuture/cinder
|
fff95fa6a68a054488ee087b6e31f4f5e28209dc
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/unit/scheduler/test_capacity_weigher.py
|
BelieveInFuture/cinder
|
fff95fa6a68a054488ee087b6e31f4f5e28209dc
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Capacity Weigher."""
from datetime import datetime
from unittest import mock
import ddt
from cinder.common import constants
from cinder import context
from cinder.scheduler import weights
from cinder.tests.unit.scheduler import fakes
from cinder.tests.unit import test
from cinder.volume import volume_utils
@ddt.ddt
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.OrderedHostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_hosts(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[weights.capacity.CapacityWeigher],
hosts,
weight_properties)
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled)
backend_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with(
ctxt,
None, # backend_match_level
topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled)
return backend_states
# If thin and thin_provisioning_support are True,
# use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
# Otherwise, use the following formula:
# free = free_space - math.floor(total * reserved)
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_default_of_spreading_first(self, volume_type, winner):
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# Norm=0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# Norm=1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# Norm=0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-1
# Norm=0.0819000819001
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host4'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {}},
'winner': 'host4'},
{'volume_type': {},
'winner': 'host4'},
{'volume_type': None,
'winner': 'host4'},
)
@ddt.unpack
def test_capacity_weight_multiplier1(self, volume_type, winner):
self.flags(capacity_weight_multiplier=-1.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# Norm=-0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# Norm=--0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=-float('inf')
# Norm=-1.0
# so, host4 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)
weighed_host = weighed_host[0]
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host2'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
'winner': 'host1'},
{'volume_type': {'extra_specs': {}},
'winner': 'host2'},
{'volume_type': {},
'winner': 'host2'},
{'volume_type': None,
'winner': 'host2'},
)
@ddt.unpack
def test_capacity_weight_multiplier2(self, volume_type, winner):
self.flags(capacity_weight_multiplier=2.0)
backend_info_list = self._get_all_backends()
# Results for the 1st test
# {'provisioning:type': 'thin'}:
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# Norm=1.67567567568
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# Norm=2.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# Norm=0.584766584767
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# Norm=0.0
# host5: free_capacity_gb=unknown free=-2
# Norm=0.1638001638
# so, host2 should win:
weight_properties = {
'size': 1,
'volume_type': volume_type,
}
weighed_host = self._get_weighed_hosts(
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
del self.host_manager.service_states['host5']
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm=-0.837837837838
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-1.0
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.292383292383
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host2 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=unknown free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=unknown
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 3000,
'free_capacity_gb': 'infinite',
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=infinite free=3000
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
self.host_manager.service_states['host5'] = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 3000,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage': 5,
'timestamp': datetime.utcnow()}
backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))=-922
# Norm= -0.00829542413701
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
# Norm=-0.00990099009901
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)=-256
# Norm=-0.002894884083
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))=101
# Norm=0.0
# host5: free_capacity_gb=3000 free=infinite
# Norm=-1.0
# so, host4 should win:
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
| 41.080952
| 78
| 0.591805
| 1,986
| 17,254
| 4.877644
| 0.111279
| 0.057809
| 0.062145
| 0.055745
| 0.801487
| 0.788583
| 0.778053
| 0.77444
| 0.769588
| 0.750077
| 0
| 0.098519
| 0.295815
| 17,254
| 419
| 79
| 41.178998
| 0.698765
| 0.364611
| 0
| 0.71831
| 0
| 0
| 0.13985
| 0.051192
| 0
| 0
| 0
| 0
| 0.126761
| 1
| 0.051643
| false
| 0
| 0.042254
| 0
| 0.107981
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ab2a3bb0b0c259dbbd1fc7999241244a9e4e859
| 115
|
py
|
Python
|
cryptoapi/base/__init__.py
|
edwinschrubb/cryptoapi
|
0b4351560c4d55a3f38847f94f82c0a34afe87bc
|
[
"MIT"
] | 9
|
2020-08-07T04:12:45.000Z
|
2022-03-15T03:28:43.000Z
|
cryptoapi/base/__init__.py
|
edwinschrubb/cryptoapi
|
0b4351560c4d55a3f38847f94f82c0a34afe87bc
|
[
"MIT"
] | null | null | null |
cryptoapi/base/__init__.py
|
edwinschrubb/cryptoapi
|
0b4351560c4d55a3f38847f94f82c0a34afe87bc
|
[
"MIT"
] | 4
|
2020-08-07T08:48:22.000Z
|
2021-12-23T05:18:24.000Z
|
from cryptoapi.base import errors
from cryptoapi.base import exchange
__all__ = exchange.__all__ + errors.__all__
| 23
| 43
| 0.826087
| 15
| 115
| 5.533333
| 0.466667
| 0.313253
| 0.409639
| 0.554217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121739
| 115
| 4
| 44
| 28.75
| 0.821782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8ac8e5ffd478d31fd150c66546b4818f12f3630d
| 95
|
py
|
Python
|
lp3thw/ex3c.py
|
Herne/pythonplayground
|
c321ebbfe0480b36df077425ba2756adac480ac9
|
[
"MIT"
] | 1
|
2017-05-01T10:13:02.000Z
|
2017-05-01T10:13:02.000Z
|
lp3thw/ex3c.py
|
Herne/pythonplayground
|
c321ebbfe0480b36df077425ba2756adac480ac9
|
[
"MIT"
] | null | null | null |
lp3thw/ex3c.py
|
Herne/pythonplayground
|
c321ebbfe0480b36df077425ba2756adac480ac9
|
[
"MIT"
] | null | null | null |
# Calculate credit card bill
print ("April 28 Credit Card Bill")
print (100 + 200 + 300 + 400)
| 31.666667
| 35
| 0.694737
| 15
| 95
| 4.4
| 0.733333
| 0.30303
| 0.424242
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 0.2
| 95
| 3
| 36
| 31.666667
| 0.684211
| 0.273684
| 0
| 0
| 0
| 0
| 0.373134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
8acff3701fc0760dc146879d59281c6b197d0e26
| 8,391
|
py
|
Python
|
isi_sdk_8_2_2/isi_sdk_8_2_2/api/remotesupport_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_2/isi_sdk_8_2_2/api/remotesupport_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_2/isi_sdk_8_2_2/api/remotesupport_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_2_2.api_client import ApiClient
class RemotesupportApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_remotesupport_connectemc(self, **kwargs): # noqa: E501
"""get_remotesupport_connectemc # noqa: E501
List all settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remotesupport_connectemc(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: RemotesupportConnectemc
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_remotesupport_connectemc_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_remotesupport_connectemc_with_http_info(**kwargs) # noqa: E501
return data
def get_remotesupport_connectemc_with_http_info(self, **kwargs): # noqa: E501
"""get_remotesupport_connectemc # noqa: E501
List all settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remotesupport_connectemc_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: RemotesupportConnectemc
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_remotesupport_connectemc" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/remotesupport/connectemc', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemotesupportConnectemc', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_remotesupport_connectemc(self, remotesupport_connectemc, **kwargs): # noqa: E501
"""update_remotesupport_connectemc # noqa: E501
Modify one or more settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_remotesupport_connectemc(remotesupport_connectemc, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemotesupportConnectemcConnectemc remotesupport_connectemc: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_remotesupport_connectemc_with_http_info(remotesupport_connectemc, **kwargs) # noqa: E501
else:
(data) = self.update_remotesupport_connectemc_with_http_info(remotesupport_connectemc, **kwargs) # noqa: E501
return data
def update_remotesupport_connectemc_with_http_info(self, remotesupport_connectemc, **kwargs): # noqa: E501
"""update_remotesupport_connectemc # noqa: E501
Modify one or more settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_remotesupport_connectemc_with_http_info(remotesupport_connectemc, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemotesupportConnectemcConnectemc remotesupport_connectemc: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['remotesupport_connectemc'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_remotesupport_connectemc" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'remotesupport_connectemc' is set
if ('remotesupport_connectemc' not in params or
params['remotesupport_connectemc'] is None):
raise ValueError("Missing the required parameter `remotesupport_connectemc` when calling `update_remotesupport_connectemc`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'remotesupport_connectemc' in params:
body_params = params['remotesupport_connectemc']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/remotesupport/connectemc', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.293333
| 150
| 0.634847
| 910
| 8,391
| 5.578022
| 0.171429
| 0.163121
| 0.057132
| 0.028369
| 0.85067
| 0.826438
| 0.800236
| 0.785264
| 0.7829
| 0.762017
| 0
| 0.016918
| 0.281492
| 8,391
| 224
| 151
| 37.459821
| 0.825012
| 0.333929
| 0
| 0.714286
| 1
| 0
| 0.182418
| 0.090239
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
76d1585b196750f45acd09d572152a68b84dc932
| 50,825
|
py
|
Python
|
ocbind/interfaces/interface/ethernet/switched_vlan/state/__init__.py
|
SeanCondon/onos-config-demo
|
0789d397b46fd5cda512ae7fffe35e1a4bfdfdbe
|
[
"Apache-2.0"
] | 1
|
2019-08-01T17:42:57.000Z
|
2019-08-01T17:42:57.000Z
|
ocbind/interfaces/interface/ethernet/switched_vlan/state/__init__.py
|
SeanCondon/onos-config-demo
|
0789d397b46fd5cda512ae7fffe35e1a4bfdfdbe
|
[
"Apache-2.0"
] | 1
|
2021-05-26T16:38:04.000Z
|
2021-05-26T16:38:04.000Z
|
ocbind/interfaces/interface/ethernet/switched_vlan/state/__init__.py
|
SeanCondon/onos-config-demo
|
0789d397b46fd5cda512ae7fffe35e1a4bfdfdbe
|
[
"Apache-2.0"
] | 4
|
2019-07-24T16:52:39.000Z
|
2021-12-03T02:08:13.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/ethernet/switched-vlan/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State variables for VLANs
"""
__slots__ = ('_path_helper', '_extmethods', '__interface_mode','__native_vlan','__access_vlan','__trunk_vlans',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['interfaces', 'interface', 'ethernet', 'switched-vlan', 'state']
def _get_interface_mode(self):
"""
Getter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
return self.__interface_mode
def _set_interface_mode(self, v, load=False):
"""
Setter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_mode() directly.
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_mode must be of a type compatible with oc-vlan-types:vlan-mode-type""",
'defined-type': "oc-vlan-types:vlan-mode-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)""",
})
self.__interface_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_mode(self):
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
def _get_native_vlan(self):
"""
Getter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
return self.__native_vlan
def _set_native_vlan(self, v, load=False):
"""
Setter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan() directly.
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__native_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan(self):
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_access_vlan(self):
"""
Getter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
YANG Description: Assign the access vlan to the access port.
"""
return self.__access_vlan
def _set_access_vlan(self, v, load=False):
"""
Setter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_vlan() directly.
YANG Description: Assign the access vlan to the access port.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__access_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_access_vlan(self):
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_trunk_vlans(self):
"""
Getter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
return self.__trunk_vlans
def _set_trunk_vlans(self, v, load=False):
"""
Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_vlans must be of a type compatible with union""",
'defined-type': "openconfig-vlan:union",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)""",
})
self.__trunk_vlans = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk_vlans(self):
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
interface_mode = __builtin__.property(_get_interface_mode)
native_vlan = __builtin__.property(_get_native_vlan)
access_vlan = __builtin__.property(_get_access_vlan)
trunk_vlans = __builtin__.property(_get_trunk_vlans)
_pyangbind_elements = OrderedDict([('interface_mode', interface_mode), ('native_vlan', native_vlan), ('access_vlan', access_vlan), ('trunk_vlans', trunk_vlans), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/ethernet/switched-vlan/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State variables for VLANs
"""
__slots__ = ('_path_helper', '_extmethods', '__interface_mode','__native_vlan','__access_vlan','__trunk_vlans',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['interfaces', 'interface', 'ethernet', 'switched-vlan', 'state']
def _get_interface_mode(self):
"""
Getter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
return self.__interface_mode
def _set_interface_mode(self, v, load=False):
"""
Setter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_mode() directly.
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_mode must be of a type compatible with oc-vlan-types:vlan-mode-type""",
'defined-type': "oc-vlan-types:vlan-mode-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)""",
})
self.__interface_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_mode(self):
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
def _get_native_vlan(self):
"""
Getter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
return self.__native_vlan
def _set_native_vlan(self, v, load=False):
"""
Setter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan() directly.
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__native_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan(self):
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_access_vlan(self):
"""
Getter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
YANG Description: Assign the access vlan to the access port.
"""
return self.__access_vlan
def _set_access_vlan(self, v, load=False):
"""
Setter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_vlan() directly.
YANG Description: Assign the access vlan to the access port.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__access_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_access_vlan(self):
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_trunk_vlans(self):
"""
Getter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
return self.__trunk_vlans
def _set_trunk_vlans(self, v, load=False):
"""
Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_vlans must be of a type compatible with union""",
'defined-type': "openconfig-vlan:union",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)""",
})
self.__trunk_vlans = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk_vlans(self):
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
interface_mode = __builtin__.property(_get_interface_mode)
native_vlan = __builtin__.property(_get_native_vlan)
access_vlan = __builtin__.property(_get_access_vlan)
trunk_vlans = __builtin__.property(_get_trunk_vlans)
_pyangbind_elements = OrderedDict([('interface_mode', interface_mode), ('native_vlan', native_vlan), ('access_vlan', access_vlan), ('trunk_vlans', trunk_vlans), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/ethernet/switched-vlan/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State variables for VLANs
"""
__slots__ = ('_path_helper', '_extmethods', '__interface_mode','__native_vlan','__access_vlan','__trunk_vlans',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['interfaces', 'interface', 'ethernet', 'switched-vlan', 'state']
def _get_interface_mode(self):
"""
Getter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
return self.__interface_mode
def _set_interface_mode(self, v, load=False):
"""
Setter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_mode() directly.
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_mode must be of a type compatible with oc-vlan-types:vlan-mode-type""",
'defined-type': "oc-vlan-types:vlan-mode-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)""",
})
self.__interface_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_mode(self):
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
def _get_native_vlan(self):
"""
Getter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
return self.__native_vlan
def _set_native_vlan(self, v, load=False):
"""
Setter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan() directly.
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__native_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan(self):
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_access_vlan(self):
"""
Getter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
YANG Description: Assign the access vlan to the access port.
"""
return self.__access_vlan
def _set_access_vlan(self, v, load=False):
"""
Setter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_vlan() directly.
YANG Description: Assign the access vlan to the access port.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__access_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_access_vlan(self):
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_trunk_vlans(self):
"""
Getter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
return self.__trunk_vlans
def _set_trunk_vlans(self, v, load=False):
"""
Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_vlans must be of a type compatible with union""",
'defined-type': "openconfig-vlan:union",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)""",
})
self.__trunk_vlans = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk_vlans(self):
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
interface_mode = __builtin__.property(_get_interface_mode)
native_vlan = __builtin__.property(_get_native_vlan)
access_vlan = __builtin__.property(_get_access_vlan)
trunk_vlans = __builtin__.property(_get_trunk_vlans)
_pyangbind_elements = OrderedDict([('interface_mode', interface_mode), ('native_vlan', native_vlan), ('access_vlan', access_vlan), ('trunk_vlans', trunk_vlans), ])
| 69.718793
| 713
| 0.716006
| 7,123
| 50,825
| 4.893865
| 0.032571
| 0.033564
| 0.043375
| 0.030982
| 0.988095
| 0.98187
| 0.98187
| 0.98187
| 0.98187
| 0.98187
| 0
| 0.024406
| 0.139813
| 50,825
| 728
| 714
| 69.81456
| 0.772936
| 0.232976
| 0
| 0.950649
| 0
| 0.054545
| 0.351202
| 0.169508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.041558
| 0
| 0.267532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76d45ea7c536b046d061963ad67ac03aa0a01be2
| 1,622
|
py
|
Python
|
REST/rest-api-sections-master/section2/8_static_class_methods.py
|
Rebell-Leader/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | 2
|
2019-10-03T17:26:17.000Z
|
2021-05-09T01:00:55.000Z
|
REST/rest-api-sections-master/section2/8_static_class_methods.py
|
Rebell-Leader/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | null | null | null |
REST/rest-api-sections-master/section2/8_static_class_methods.py
|
Rebell-Leader/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | null | null | null |
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
def go_to_school(self):
return "I'm going to {}".format(self.school)
anna = Student("Anna", "Oxford")
rolf = Student("Rolf", "Harvard")
print(anna.go_to_school())
print(rolf.go_to_school())
###
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
def go_to_school(self):
return "I'm going to school"
anna = Student("Anna", "Oxford")
rolf = Student("Rolf", "Harvard")
print(anna.go_to_school())
print(rolf.go_to_school())
###
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
@staticmethod
def go_to_school():
return "I'm going to school"
anna = Student("Anna", "Oxford")
rolf = Student("Rolf", "Harvard")
print(anna.go_to_school())
print(rolf.go_to_school())
###
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
def friend(self, friend_name):
return Student(friend_name, self.school)
anna = Student("Anna", "Oxford")
friend = anna.friend("Greg")
print(friend.name)
print(friend.school)
| 20.024691
| 52
| 0.614057
| 214
| 1,622
| 4.485981
| 0.121495
| 0.1125
| 0.09375
| 0.079167
| 0.86875
| 0.86875
| 0.832292
| 0.832292
| 0.832292
| 0.832292
| 0
| 0
| 0.240444
| 1,622
| 80
| 53
| 20.275
| 0.779221
| 0
| 0
| 0.849057
| 0
| 0
| 0.080695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.226415
| false
| 0
| 0
| 0.150943
| 0.45283
| 0.150943
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 9
|
76d9d4438546f141b676d06ec608310735aab11f
| 55,307
|
py
|
Python
|
Rules.py
|
ShadoMagi/OoT-Randomizer
|
f9abb00a299e2f50f52fee4962047f8ab7f84975
|
[
"MIT"
] | null | null | null |
Rules.py
|
ShadoMagi/OoT-Randomizer
|
f9abb00a299e2f50f52fee4962047f8ab7f84975
|
[
"MIT"
] | null | null | null |
Rules.py
|
ShadoMagi/OoT-Randomizer
|
f9abb00a299e2f50f52fee4962047f8ab7f84975
|
[
"MIT"
] | null | null | null |
import collections
import logging
def set_rules(world):
global_rules(world)
if world.bridge == 'medallions':
# require all medallions to form the bridge
set_rule(world.get_entrance('Rainbow Bridge'), lambda state: state.has('Forest Medallion') and state.has('Fire Medallion') and state.has('Water Medallion') and state.has('Shadow Medallion') and state.has('Spirit Medallion') and state.has('Light Medallion'))
elif world.bridge == 'vanilla':
# require only what vanilla did to form the bridge
set_rule(world.get_entrance('Rainbow Bridge'), lambda state: state.has('Light Arrows') and state.has('Shadow Medallion') and state.has('Spirit Medallion'))
elif world.bridge == 'dungeons':
# require all medallions and stones to form the bridge
set_rule(world.get_entrance('Rainbow Bridge'), lambda state: state.has('Forest Medallion') and state.has('Fire Medallion') and state.has('Water Medallion') and state.has('Shadow Medallion') and state.has('Spirit Medallion') and state.has('Light Medallion') and state.has('Kokiri Emerald') and state.has('Goron Ruby') and state.has('Zora Sapphire'))
def set_rule(spot, rule):
spot.access_rule = rule
def set_always_allow(spot, rule):
spot.always_allow = rule
def add_rule(spot, rule, combine='and'):
old_rule = spot.access_rule
if combine == 'or':
spot.access_rule = lambda state: rule(state) or old_rule(state)
else:
spot.access_rule = lambda state: rule(state) and old_rule(state)
def forbid_item(location, item):
old_rule = location.item_rule
location.item_rule = lambda i: i.name != item and old_rule(i)
def item_in_locations(state, item, locations):
for location in locations:
if item_name(state, location) == item:
return True
return False
def item_name(state, location):
location = state.world.get_location(location)
if location.item is None:
return None
return location.item.name
def global_rules(world):
# ganon can only carry triforce
world.get_location('Ganon').item_rule = lambda item: item.name == 'Triforce'
# these are default save&quit points and always accessible
world.get_region('Links House').can_reach = lambda state: True
# overworld requirements
set_rule(world.get_entrance('Deku Tree'), lambda state: state.has('Kokiri Sword') or world.open_forest)
set_rule(world.get_entrance('Lost Woods Bridge'), lambda state: world.open_forest or (state.has('Slingshot') and state.has('Kokiri Sword')))
set_rule(world.get_entrance('Deku Tree Basement Path'), lambda state: state.has('Slingshot'))
set_rule(world.get_location('Skull Kid'), lambda state: state.has('Sarias Song'))
set_rule(world.get_location('Ocarina Memory Game'), lambda state: state.has('Fairy Ocarina') or state.has('Ocarina of Time'))
set_rule(world.get_location('Target in Woods'), lambda state: state.has('Slingshot'))
set_rule(world.get_location('Deku Theater Skull Mask'), lambda state: state.has('Zeldas Letter'))
set_rule(world.get_location('Deku Theater Mask of Truth'), lambda state: state.has('Zeldas Letter') and state.has('Sarias Song') and state.has('Kokiri Emerald') and state.has('Goron Ruby') and state.has('Zora Sapphire') and state.guarantee_hint()) #Must befriend Skull Kid to sell Skull Mask, all stones to spawn running man.
set_rule(world.get_location('Anju as Adult'), lambda state: state.is_adult())
set_rule(world.get_location('Man on Roof'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('Impa House Freestanding PoH'), lambda state: (state.has('Progressive Hookshot') and state.is_adult()) or state.has('Bomb Bag'))
set_rule(world.get_location('10 Gold Skulltulla Reward'), lambda state: state.has('Gold Skulltulla Token', 10))
set_rule(world.get_location('20 Gold Skulltulla Reward'), lambda state: state.has('Gold Skulltulla Token', 20))
set_rule(world.get_location('30 Gold Skulltulla Reward'), lambda state: state.has('Gold Skulltulla Token', 30) and state.guarantee_hint())
set_rule(world.get_location('40 Gold Skulltulla Reward'), lambda state: state.has('Gold Skulltulla Token', 40) and state.guarantee_hint())
set_rule(world.get_location('50 Gold Skulltulla Reward'), lambda state: state.has('Gold Skulltulla Token', 50) and state.guarantee_hint())
set_rule(world.get_location('Heart Piece Grave Chest'), lambda state: state.has('Suns Song'))
set_rule(world.get_entrance('Composer Grave'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Composer Grave Chest'), lambda state: state.has_fire_source())
set_rule(world.get_entrance('Bottom of the Well'), lambda state: state.has('Song of Storms'))
set_rule(world.get_location('Bottom of the Well Front Left Hidden Wall'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Bottom of the Well Front Center Bombable'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Bottom of the Well Right Bottom Hidden Wall'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Bottom of the Well Center Large Chest'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Bottom of the Well Center Small Chest'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Bottom of the Well Back Left Bombable'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Bottom of the Well Defeat Boss'), lambda state: state.has('Zeldas Lullaby') and state.has('Kokiri Sword')) #Sword not strictly necessary but frankly being forced to do this with sticks isn't fair
set_rule(world.get_location('Bottom of the Well Invisible Chest'), lambda state: state.has('Zeldas Lullaby') and state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Bottom of the Well Underwater Front Chest'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Bottom of the Well Underwater Left Chest'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Bottom of the Well Basement Chest'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Bottom of the Well Locked Pits'), lambda state: state.has('Small Key (Bottom of the Well)', 3) and state.has('Lens of Truth') and state.has('Magic Meter')) #These pits are really unfair.
set_rule(world.get_location('Bottom of the Well Behind Right Grate'), lambda state: state.has('Small Key (Bottom of the Well)', 3) and state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_entrance('Death Mountain Entrance'), lambda state: state.has('Zeldas Letter') or state.is_adult())
set_rule(world.get_location('Death Mountain Bombable Chest'), lambda state: state.can_blast())
set_rule(world.get_location('Biggoron'), lambda state: state.can_blast() and state.is_adult() and state.can_finish_adult_trades() and state.guarantee_hint())
set_rule(world.get_location('Goron City Leftmost Maze Chest'), lambda state: state.is_adult() and (state.has('Progressive Strength Upgrade', 2) or state.has('Hammer')))
set_rule(world.get_location('Goron City Left Maze Chest'), lambda state: state.can_blast() or (state.has('Progressive Strength Upgrade', 2) and state.is_adult()))
set_rule(world.get_location('Goron City Right Maze Chest'), lambda state: state.can_blast() or (state.has('Progressive Strength Upgrade', 2) and state.is_adult()))
set_rule(world.get_location('Rolling Goron as Child'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Goron City Pot Freestanding PoH'), lambda state: (state.has('Bomb Bag') or state.has('Progressive Strength Upgrade')) and (state.has('Zeldas Lullaby') or (state.has('Dins Fire') and state.has('Magic Meter'))))
set_rule(world.get_entrance('Darunias Chamber'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Darunias Joy'), lambda state: state.has('Sarias Song'))
set_rule(world.get_entrance('Goron City from Woods'), lambda state: state.can_blast() and (world.open_forest or (state.has('Slingshot') and state.has('Kokiri Sword'))))
set_rule(world.get_entrance('Dodongos Cavern Rocks'), lambda state: state.can_blast() or state.has('Progressive Strength Upgrade') or state.is_adult())
set_rule(world.get_entrance('Dodongos Cavern Lobby'), lambda state: state.can_blast() or state.has('Progressive Strength Upgrade'))
set_rule(world.get_entrance('Dodongos Cavern Left Door'), lambda state: state.has('Bomb Bag') or state.has('Progressive Strength Upgrade') or (state.has('Dins Fire') and state.has('Magic Meter')))
set_rule(world.get_entrance('Dodongos Cavern Slingshot Target'), lambda state: state.has('Slingshot') or ((state.has('Bow') or state.has('Hover Boots')) and state.is_adult()))
set_rule(world.get_location('Dodongos Cavern End of Bridge Chest'), lambda state: state.has('Bomb Bag') or ((state.has('Bow') or state.has('Hover Boots')) and state.is_adult() and state.has('Hammer')))
set_rule(world.get_entrance('Dodongos Cavern Bomb Drop'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Song from Saria'), lambda state: state.has('Zeldas Letter'))
set_rule(world.get_entrance('Mountain Summit Fairy'), lambda state: state.can_blast())
set_rule(world.get_location('Crater Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Mountain Summit Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Mountain Crater Entrance'), lambda state: state.can_blast())
set_rule(world.get_entrance('Hyrule Castle Fairy'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Hyrule Castle Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Ganons Castle Grounds'), lambda state: state.is_adult())
set_rule(world.get_entrance('Ganons Castle Fairy'), lambda state: state.has('Progressive Strength Upgrade', 3))
set_rule(world.get_location('Ganons Castle Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Bombchu Bowling Bomb Bag'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Bombchu Bowling Piece of Heart'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Adult Shooting Gallery'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('10 Big Poes'), lambda state: state.has('Bow') and state.has('Epona') and state.has_bottle() and state.is_adult() and state.guarantee_hint())
set_rule(world.get_location('Treasure Chest Game'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_entrance('Lost Woods Dive Warp'), lambda state: state.can_dive() and (world.open_forest or (state.has('Slingshot') and state.has('Kokiri Sword'))))
set_rule(world.get_entrance('Zora River Dive Warp'), lambda state: state.can_dive())
set_rule(world.get_entrance('Lake Hylia Dive Warp'), lambda state: state.can_dive())
set_rule(world.get_entrance('Zoras Domain Dive Warp'), lambda state: state.can_dive())
set_rule(world.get_entrance('Zora River Waterfall'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Zora River Rocks'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Zora River Lower Freestanding PoH'), lambda state: state.has('Bomb Bag') or state.has('Progressive Scale') or (state.has('Hover Boots') and state.is_adult()))
set_rule(world.get_location('Zora River Upper Freestanding PoH'), lambda state: state.has('Bomb Bag') or state.has('Progressive Scale') or (state.has('Hover Boots') and state.is_adult()))
set_rule(world.get_location('Frog Ocarina Game'), lambda state: state.has('Zeldas Lullaby') and state.has('Sarias Song') and state.has('Suns Song') and state.has('Eponas Song') and state.has('Song of Time') and state.has('Song of Storms'))
set_rule(world.get_location('Frogs in the Rain'), lambda state: state.has('Song of Storms'))
set_rule(world.get_location('Underwater Bottle'), lambda state: state.can_dive())
set_rule(world.get_location('King Zora Moves'), lambda state: state.has('Bottle with Letter'))
set_rule(world.get_entrance('Behind King Zora'), lambda state: state.has('Bottle with Letter'))
set_rule(world.get_entrance('Zora River Adult'), lambda state: state.is_adult())
set_rule(world.get_entrance('Zoras Domain Adult Access'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Zoras Fountain Adult Access'), lambda state: state.can_reach('Zoras Fountain'))
set_rule(world.get_entrance('Jabu Jabus Belly'), lambda state: state.has_bottle())
set_rule(world.get_entrance('Zoras Fountain Fairy'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Zoras Fountain Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Jabu Jabus Belly Ceiling Switch'), lambda state: state.has('Slingshot') or state.has('Bomb Bag') or state.has('Boomerang'))
set_rule(world.get_entrance('Jabu Jabus Belly Tentacles'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('Ice Cavern Map Chest'), lambda state: state.has_bottle())
set_rule(world.get_location('Ice Cavern Compass Chest'), lambda state: state.has_bottle())
set_rule(world.get_location('Ice Cavern Freestanding PoH'), lambda state: state.has_bottle())
set_rule(world.get_location('Ice Cavern Iron Boots Chest'), lambda state: state.has_bottle())
set_rule(world.get_location('Sheik in Ice Cavern'), lambda state: state.has_bottle() and state.is_adult())
set_rule(world.get_location('Ocarina of Time'), lambda state: state.has('Kokiri Emerald') and state.has('Goron Ruby') and state.has('Zora Sapphire') and state.guarantee_hint())
set_rule(world.get_location('Song from Ocarina of Time'), lambda state: state.has('Kokiri Emerald') and state.has('Goron Ruby') and state.has('Zora Sapphire') and state.guarantee_hint())
set_rule(world.get_entrance('Door of Time'), lambda state: state.has('Song of Time') or world.open_door_of_time)
set_rule(world.get_location('Talons Chickens'), lambda state: state.has('Zeldas Letter'))
set_rule(world.get_location('Epona'), lambda state: state.has('Eponas Song') and state.is_adult())
set_rule(world.get_entrance('Adult Forest Warp Pad'), lambda state: state.has('Minuet of Forest') and state.is_adult())
set_rule(world.get_entrance('Child Forest Warp Pad'), lambda state: state.has('Minuet of Forest'))
set_rule(world.get_entrance('Adult Meadow Access'), lambda state: state.has('Sarias Song') and state.is_adult())
set_rule(world.get_entrance('Forest Temple Entrance'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_entrance('Forest Temple Song of Time Block'), lambda state: state.has('Song of Time'))
set_rule(world.get_entrance('Forest Temple Lobby Eyeball Switch'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_entrance('Forest Temple Lobby Locked Door'), lambda state: state.has('Progressive Strength Upgrade') and state.has('Small Key (Forest Temple)', 1))
set_rule(world.get_entrance('Forest Temple Well Connection'), lambda state: ((state.has('Iron Boots') or state.has('Progressive Hookshot', 2)) and state.is_adult()) or state.has('Progressive Scale', 2)) #Longshot can grab some very high up vines to drain the well.
set_rule(world.get_entrance('Forest Temple Scarecrows Song'), lambda state: False) #For some reason you can't actually activate this from below. Cool game.
set_rule(world.get_entrance('Forest Temple Elevator'), lambda state: state.has('Bow') and state.is_adult() and state.has('Progressive Strength Upgrade') and state.has('Small Key (Forest Temple)', 3))
set_rule(world.get_entrance('Forest Temple Outside Backdoor'), lambda state: state.has('Hover Boots') and state.is_adult())
set_rule(world.get_entrance('Forest Temple Twisted Hall'), lambda state: state.has('Small Key (Forest Temple)', 3))
set_rule(world.get_entrance('Forest Temple Straightened Hall'), lambda state: state.has('Small Key (Forest Temple)', 2) and state.has('Bow'))
set_rule(world.get_entrance('Forest Temple Drop to Falling Room'), lambda state: state.has('Small Key (Forest Temple)', 5) and (state.has('Bow') or (state.has('Dins Fire') and state.has('Magic Meter'))))
set_rule(world.get_location('Forest Temple Block Push Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Forest Temple Red Poe Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Forest Temple Blue Poe Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Phantom Ganon'), lambda state: state.has('Boss Key (Forest Temple)'))
set_rule(world.get_location('Phantom Ganon Heart'), lambda state: state.has('Boss Key (Forest Temple)'))
set_rule(world.get_entrance('Dampes Grave'), lambda state: state.is_adult())
set_rule(world.get_location('Graveyard Freestanding PoH'), lambda state: state.is_adult() and (state.has('Magic Bean') or state.has('Progressive Hookshot', 2)))
set_rule(world.get_location('Song at Windmill'), lambda state: state.is_adult())
set_rule(world.get_location('Windmill Freestanding PoH'), lambda state: (state.is_adult() and state.has('Song of Time')) or state.has('Boomerang'))
set_rule(world.get_entrance('Temple Warp Pad'), lambda state: state.has('Prelude of Light'))
set_rule(world.get_location('Sheik at Temple'), lambda state: state.has('Forest Medallion') and state.is_adult())
set_rule(world.get_location('Diving in the Lab'), lambda state: state.has('Progressive Scale', 2))
set_rule(world.get_location('Child Fishing'), lambda state: state.has('Kokiri Sword'))
set_rule(world.get_location('Adult Fishing'), lambda state: state.is_adult() and (state.has('Progressive Hookshot') or state.has('Magic Bean')))
set_rule(world.get_location('Lake Hylia Freestanding PoH'), lambda state: state.is_adult() and (state.has('Progressive Hookshot') or state.has('Magic Bean')))
set_rule(world.get_location('Lake Hylia Sun'), lambda state: state.has('Progressive Hookshot', 2) and state.has('Bow') and state.is_adult())
set_rule(world.get_entrance('Crater Hover Boots'), lambda state: state.is_adult() and state.has('Hover Boots'))
set_rule(world.get_entrance('Crater Ascent'), lambda state: state.is_adult() and state.has_GoronTunic())
set_rule(world.get_entrance('Crater Scarecrow'), lambda state: state.is_adult() and state.has('Progressive Hookshot', 2) and state.has_GoronTunic())
set_rule(world.get_entrance('Crater Bridge'), lambda state: state.is_adult() and (state.has('Hover Boots') or state.has('Progressive Hookshot')))
set_rule(world.get_entrance('Crater Bridge Reverse'), lambda state: state.is_adult() and (state.has('Hover Boots') or state.has('Progressive Hookshot')))
set_rule(world.get_entrance('Crater Warp Pad'), lambda state: state.has('Bolero of Fire'))
set_rule(world.get_entrance('Crater Fairy'), lambda state: state.is_adult() and state.has('Hammer'))
set_rule(world.get_location('DM Crater Volcano Freestanding PoH'), lambda state: state.is_adult() and state.has('Magic Bean') and state.has('Bolero of Fire'))
set_rule(world.get_entrance('Fire Temple Entrance'), lambda state: state.is_adult() and state.has_GoronTunic())
set_rule(world.get_entrance('Fire Temple Early Climb'), lambda state: state.has('Small Key (Fire Temple)', 3) and state.has('Progressive Strength Upgrade') and (state.has('Bomb Bag') or ((state.has('Bow') or state.has('Progressive Hookshot')) and state.is_adult())))
set_rule(world.get_entrance('Fire Temple Fire Maze Escape'), lambda state: state.has('Small Key (Fire Temple)', 7) or (state.has('Small Key (Fire Temple)', 6) and state.has('Hover Boots') and state.has('Hammer') and state.is_adult()))
set_rule(world.get_location('Fire Temple Fire Dancer Chest'), lambda state: state.is_adult() and state.has('Hammer'))
set_rule(world.get_location('Fire Temple Boss Key Chest'), lambda state: state.is_adult() and state.has('Hammer'))
set_rule(world.get_location('Fire Temple Big Lava Room Bombable Chest'), lambda state: state.has('Small Key (Fire Temple)', 1) and state.has('Bomb Bag'))
set_rule(world.get_location('Fire Temple Big Lava Room Open Chest'), lambda state: state.has('Small Key (Fire Temple)', 1))
set_rule(world.get_location('Fire Temple Map Chest'), lambda state: state.has('Small Key (Fire Temple)', 5) or (state.has('Small Key (Fire Temple)', 4) and state.is_adult() and state.has('Bow')))
set_rule(world.get_location('Fire Temple Boulder Maze Upper Chest'), lambda state: state.has('Small Key (Fire Temple)', 5))
set_rule(world.get_location('Fire Temple Boulder Maze Bombable Pit'), lambda state: state.has('Small Key (Fire Temple)', 5) and state.has('Bomb Bag'))
set_rule(world.get_location('Fire Temple Scarecrow Chest'), lambda state: state.has('Small Key (Fire Temple)', 5) and state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('Fire Temple Compass Chest'), lambda state: state.has('Small Key (Fire Temple)', 6))
set_rule(world.get_location('Fire Temple Highest Goron Chest'), lambda state: state.has('Song of Time') and state.has('Hammer') and state.is_adult())
set_rule(world.get_location('Fire Temple Megaton Hammer Chest'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Volvagia'), lambda state: state.has('Hammer') and state.is_adult() and state.has('Boss Key (Fire Temple)') and (state.has('Hover Boots') or (state.can_reach('Fire Temple Upper') and (state.has('Song of Time') or state.has('Bomb Bag')))))
set_rule(world.get_location('Volvagia Heart'), lambda state: state.has('Hammer') and state.is_adult() and state.has('Boss Key (Fire Temple)') and (state.has('Hover Boots') or (state.can_reach('Fire Temple Upper') and (state.has('Song of Time') or state.has('Bomb Bag')))))
set_rule(world.get_location('Sheik in Crater'), lambda state: state.is_adult())
set_rule(world.get_location('Link the Goron'), lambda state: state.is_adult() and (state.has('Progressive Strength Upgrade') or state.has('Bomb Bag') or state.has('Bow')))
set_rule(world.get_entrance('Crater Access'), lambda state: state.is_adult() and (state.has('Progressive Strength Upgrade') or state.has('Bomb Bag')))
set_rule(world.get_entrance('Lake Warp Pad'), lambda state: state.has('Serenade of Water'))
set_rule(world.get_location('King Zora Thawed'), lambda state: state.has_bottle() and (state.can_reach('Ice Cavern') or state.can_reach('Ganons Castle Water Trial') or state.has('Progressive Wallet', 2)))
set_rule(world.get_location('Zoras Fountain Bottom Freestanding PoH'), lambda state: state.has('Iron Boots'))
set_rule(world.get_entrance('Water Temple Entrance'), lambda state: state.is_adult() and (state.has('Zora Tunic') or (state.has('Progressive Wallet', 2) and state.has_bottle() and state.has('Zeldas Lullaby'))) and state.has('Iron Boots') and state.has('Progressive Hookshot'))
set_rule(world.get_entrance('Water Temple Central Pillar'), lambda state: (state.has('Bow') or (state.has('Dins Fire') and state.has('Magic Meter')) or state.has('Small Key (Water Temple)', 5)) and state.has('Zeldas Lullaby'))
set_rule(world.get_entrance('Water Temple Upper Locked Door'), lambda state: state.has('Small Key (Water Temple)', 5) and (state.has('Zeldas Lullaby') or world.keysanity))
set_rule(world.get_location('Water Temple Torches Chest'), lambda state: (state.has('Bow') or (state.has('Dins Fire') and state.has('Magic Meter'))) and state.has('Zeldas Lullaby'))
set_rule(world.get_location('Water Temple Dragon Chest'), lambda state: (state.has('Progressive Strength Upgrade') and state.has('Zeldas Lullaby')) or (state.has('Small Key (Water Temple)', 6) and (state.has('Zeldas Lullaby') or world.keysanity) and state.has('Song of Time') and state.has('Bow')))
set_rule(world.get_location('Water Temple Central Bow Target Chest'), lambda state: state.has('Bow') and state.has('Progressive Strength Upgrade') and state.has('Zeldas Lullaby') and (state.has('Hover Boots') or state.has('Progressive Hookshot', 2)))
set_always_allow(world.get_location('Water Temple Boss Key Chest'), lambda item, state: item.name == 'Small Key (Water Temple)')
set_rule(world.get_location('Water Temple Boss Key Chest'), lambda state: (state.has('Small Key (Water Temple)', 6) and (state.has('Zeldas Lullaby') or world.keysanity) and ((state.has('Bomb Bag') and state.has('Progressive Strength Upgrade')) or state.has('Hover Boots')) and state.has('Progressive Hookshot', 2)) or item_name(state, 'Water Temple Boss Key Chest') == 'Small Key (Water Temple)') #If key for key, this lets the logic reduce the small key reqs for every other locked door.
set_rule(world.get_location('Morpha'), lambda state: state.has('Boss Key (Water Temple)') and state.has('Progressive Hookshot', 2))
set_rule(world.get_location('Morpha Heart'), lambda state: state.has('Boss Key (Water Temple)') and state.has('Progressive Hookshot', 2))
set_rule(world.get_location('Water Temple Cracked Wall Chest'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Water Temple Dark Link Chest'), lambda state: state.has('Small Key (Water Temple)', 6) and (state.has('Zeldas Lullaby') or world.keysanity))
set_rule(world.get_location('Water Temple River Chest'), lambda state: state.has('Small Key (Water Temple)', 6) and state.has('Song of Time') and state.has('Bow') and (state.has('Zeldas Lullaby') or world.keysanity))
set_rule(world.get_location('Sheik in Kakariko'), lambda state: state.has('Forest Medallion') and state.has('Fire Medallion') and state.has('Water Medallion'))
set_rule(world.get_entrance('Graveyard Warp Pad'), lambda state: state.has('Nocturne of Shadow'))
set_rule(world.get_entrance('Shadow Temple Entrance'), lambda state: state.has('Dins Fire') and state.has('Magic Meter') and state.has('Lens of Truth') and state.is_adult() and (state.has('Hover Boots') or state.has('Progressive Hookshot')))
set_rule(world.get_entrance('Shadow Temple First Pit'), lambda state: state.has('Hover Boots'))
set_rule(world.get_entrance('Shadow Temple Bomb Wall'), lambda state: state.has('Bomb Bag') and state.has('Small Key (Shadow Temple)', 1))
set_rule(world.get_entrance('Shadow Temple Hookshot Target'), lambda state: state.has('Progressive Hookshot') and state.has('Small Key (Shadow Temple)', 3))
set_rule(world.get_entrance('Shadow Temple Boat'), lambda state: state.has('Zeldas Lullaby') and state.has('Small Key (Shadow Temple)', 4))
set_rule(world.get_location('Shadow Temple Falling Spikes Upper Chest'), lambda state: state.has('Progressive Strength Upgrade'))
set_rule(world.get_location('Shadow Temple Falling Spikes Switch Chest'), lambda state: state.has('Progressive Strength Upgrade'))
set_rule(world.get_location('Shadow Temple Invisible Spikes Chest'), lambda state: state.has('Small Key (Shadow Temple)', 2))
set_rule(world.get_location('Shadow Temple Freestanding Key'), lambda state: state.has('Small Key (Shadow Temple)', 2) and state.has('Progressive Hookshot'))
set_rule(world.get_location('Bongo Bongo'), lambda state: state.has('Small Key (Shadow Temple)', 5) and (state.has('Bow') or state.has('Progressive Hookshot', 2)) and state.has('Boss Key (Shadow Temple)'))
set_rule(world.get_location('Bongo Bongo Heart'), lambda state: state.has('Small Key (Shadow Temple)', 5) and (state.has('Bow') or state.has('Progressive Hookshot', 2)) and state.has('Boss Key (Shadow Temple)'))
set_rule(world.get_entrance('Bridge Crossing'), lambda state: (state.has('Epona') or state.has('Progressive Hookshot', 2)) and state.is_adult())
set_rule(world.get_location('Gerudo Valley Hammer Rocks Chest'), lambda state: state.has('Hammer') and state.is_adult())
set_rule(world.get_entrance('Fortress Entrance'), lambda state: (state.has('Bow') or state.has('Progressive Hookshot') or state.has('Hover Boots')) and state.is_adult())
set_rule(world.get_entrance('Gerudo Training Grounds Entrance'), lambda state: state.has('Gerudo Membership Card') and state.is_adult())
set_rule(world.get_entrance('Haunted Wasteland Entrance'), lambda state: state.has('Gerudo Membership Card') and state.is_adult() and (state.has('Hover Boots') or state.has('Progressive Hookshot', 2)))
set_rule(world.get_entrance('Haunted Wasteland Crossing'), lambda state: state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_entrance('Colossus Warp Pad'), lambda state: state.has('Requiem of Spirit'))
set_rule(world.get_entrance('Colossus Fairy'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Colossus Freestanding PoH'), lambda state: state.has('Requiem of Spirit') and state.has('Magic Bean') and state.is_adult())
set_rule(world.get_location('Desert Colossus Fairy Reward'), lambda state: state.has('Zeldas Lullaby'))
set_rule(world.get_location('Gerudo Fortress Rooftop Chest'), lambda state: (state.has('Hover Boots') or state.has('Progressive Hookshot')) and state.is_adult())
set_rule(world.get_location('Horseback Archery 1000 Points'), lambda state: state.has('Gerudo Membership Card') and state.has('Epona') and state.has('Bow') and state.is_adult())
set_rule(world.get_location('Horseback Archery 1500 Points'), lambda state: state.has('Gerudo Membership Card') and state.has('Epona') and state.has('Bow') and state.is_adult())
set_rule(world.get_location('Haunted Wasteland Structure Chest'), lambda state: state.has_fire_source())
set_rule(world.get_entrance('Gerudo Training Ground Left Silver Rupees'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_entrance('Gerudo Training Ground Beamos'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_entrance('Gerudo Training Grounds Right Locked Doors'), lambda state: state.has('Small Key (Gerudo Training Grounds)', 9))
set_rule(world.get_entrance('Gerudo Training Grounds Maze Ledge'), lambda state: state.has('Song of Time'))
set_rule(world.get_entrance('Gerudo Training Grounds Right Hookshot Target'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_entrance('Gerudo Training Grounds Hammer Target'), lambda state: state.has('Hammer') and state.has('Bow') and state.is_adult())
set_rule(world.get_entrance('Gerudo Training Grounds Hidden Hookshot Target'), lambda state: state.has('Progressive Hookshot') and state.has('Lens of Truth') and state.has('Magic Meter') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Lobby Left Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Lobby Right Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Beamos Chest'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('Gerudo Training Grounds Hidden Ceiling Chest'), lambda state: state.has('Small Key (Gerudo Training Grounds)', 3) and state.has('Lens of Truth') and state.has('Magic Meter'))
set_rule(world.get_location('Gerudo Training Grounds Maze Path First Chest'), lambda state: state.has('Small Key (Gerudo Training Grounds)', 4))
set_rule(world.get_location('Gerudo Training Grounds Maze Path Second Chest'), lambda state: state.has('Small Key (Gerudo Training Grounds)', 6))
set_rule(world.get_location('Gerudo Training Grounds Maze Path Third Chest'), lambda state: state.has('Small Key (Gerudo Training Grounds)', 7))
set_rule(world.get_location('Gerudo Training Grounds Maze Path Final Chest'), lambda state: (state.has('Small Key (Gerudo Training Grounds)', 9)) or (item_name(state, 'Gerudo Training Grounds Maze Path Final Chest') == 'Small Key (Gerudo Training Grounds)' and state.has('Small Key (Gerudo Training Grounds)', 8))) #Allow key for key
set_always_allow(world.get_location('Gerudo Training Grounds Maze Path Final Chest'), lambda item, state: item.name == 'Small Key (Gerudo Training Grounds)')
set_rule(world.get_location('Gerudo Training Grounds Underwater Silver Rupee Chest'), lambda state: state.has('Progressive Hookshot') and state.has('Song of Time') and state.has('Iron Boots') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Hammer Room Switch Chest'), lambda state: state.has('Hammer') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Eye Statue Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Near Scarecrow Chest'), lambda state: state.has('Bow') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Heavy Block First Chest'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.has('Lens of Truth') and state.has('Magic Meter') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Heavy Block Second Chest'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.has('Lens of Truth') and state.has('Magic Meter') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Heavy Block Third Chest'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.has('Lens of Truth') and state.has('Magic Meter') and state.is_adult())
set_rule(world.get_location('Gerudo Training Grounds Heavy Block Fourth Chest'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.has('Lens of Truth') and state.has('Magic Meter') and state.is_adult())
set_rule(world.get_entrance('Spirit Temple Crawl Passage'), lambda state: state.has('Requiem of Spirit'))
set_rule(world.get_entrance('Spirit Temple Silver Block'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.is_adult())
set_rule(world.get_entrance('Child Spirit Temple Passthrough'), lambda state: state.has('Bomb Bag') and state.has('Small Key (Spirit Temple)', 1))
set_rule(world.get_entrance('Adult Spirit Temple Passthrough'), lambda state: state.has('Small Key (Spirit Temple)', 1))
set_rule(world.get_entrance('Spirit Temple Central Locked Door'), lambda state: state.has('Small Key (Spirit Temple)', 4) and state.has('Progressive Strength Upgrade', 2) and state.is_adult())
set_rule(world.get_entrance('Spirit Temple Final Locked Door'), lambda state: state.has('Small Key (Spirit Temple)', 5) and (state.has('Progressive Hookshot') or state.has('Bow') or state.has('Bomb Bag')))
set_rule(world.get_location('Spirit Temple Child Left Chest'), lambda state: state.has('Boomerang') or state.has('Slingshot'))
set_rule(world.get_location('Spirit Temple Child Right Chest'), lambda state: state.has('Boomerang') or state.has('Slingshot'))
set_rule(world.get_location('Spirit Temple Compass Chest'), lambda state: state.has('Progressive Hookshot') and state.has('Zeldas Lullaby'))
set_rule(world.get_location('Spirit Temple Early Adult Right Chest'), lambda state: state.has('Bow') or state.has('Progressive Hookshot') or state.has('Bomb Bag')) #Bomb Bag option requires a very specific Bombchu use, Hover Boots can be skipped by jumping on top of the rolling rock.
set_rule(world.get_location('Spirit Temple First Mirror Right Chest'), lambda state: state.has('Small Key (Spirit Temple)', 3))
set_rule(world.get_location('Spirit Temple First Mirror Left Chest'), lambda state: state.has('Small Key (Spirit Temple)', 3))
set_rule(world.get_location('Spirit Temple Map Chest'), lambda state: (state.has('Small Key (Spirit Temple)', 5) and state.has('Requiem of Spirit')) or (state.has('Magic Meter') and (state.has('Dins Fire') or (state.has('Fire Arrows') and state.has('Bow') and state.has('Progressive Strength Upgrade', 2) and state.has('Small Key (Spirit Temple)', 3) and state.is_adult()))))
set_rule(world.get_location('Spirit Temple Child Climb East Chest'), lambda state: state.has('Bomb Bag') or ((state.has('Boomerang') or state.has('Slingshot')) and (state.has('Progressive Hookshot') or state.has('Bow'))) or (state.has('Small Key (Spirit Temple)', 3) and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and (state.has('Progressive Hookshot') or state.has('Bow'))) or (state.has('Small Key (Spirit Temple)', 5) and state.has('Requiem of Spirit') and (state.has('Boomerang') or state.has('Slingshot'))))
set_rule(world.get_location('Spirit Temple Child Climb North Chest'), lambda state: state.has('Bomb Bag') or ((state.has('Boomerang') or state.has('Slingshot')) and (state.has('Progressive Hookshot') or state.has('Bow'))) or (state.has('Small Key (Spirit Temple)', 3) and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and (state.has('Progressive Hookshot') or state.has('Bow'))) or (state.has('Small Key (Spirit Temple)', 5) and state.has('Requiem of Spirit') and (state.has('Boomerang') or state.has('Slingshot'))))
set_rule(world.get_location('Spirit Temple Sun Block Room Chest'), lambda state: (state.has('Small Key (Spirit Temple)', 5) and state.has('Bomb Bag') and state.has('Requiem of Spirit')) or (state.has_fire_source() and (state.has('Bomb Bag') or state.has('Small Key (Spirit Temple)', 2))))
set_rule(world.get_location('Spirit Temple Statue Hand Chest'), lambda state: state.has('Small Key (Spirit Temple)', 3) and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and state.has('Zeldas Lullaby'))
set_rule(world.get_location('Spirit Temple NE Main Room Chest'), lambda state: state.has('Small Key (Spirit Temple)', 3) and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and state.has('Zeldas Lullaby') and state.has('Progressive Hookshot'))
set_rule(world.get_location('Mirror Shield Chest'), lambda state: state.has('Small Key (Spirit Temple)', 4) and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and state.has('Bomb Bag'))
set_rule(world.get_location('Silver Gauntlets Chest'), lambda state: (state.has('Small Key (Spirit Temple)', 3) and state.has('Progressive Hookshot', 2) and state.has('Bomb Bag')) or state.has('Small Key (Spirit Temple)', 5))
set_rule(world.get_location('Spirit Temple Near Four Armos Chest'), lambda state: state.has('Mirror Shield') and state.has('Bomb Bag'))
set_rule(world.get_location('Spirit Temple Hallway Left Invisible Chest'), lambda state: state.has('Magic Meter') and state.has('Lens of Truth') and state.has('Bomb Bag'))
set_rule(world.get_location('Spirit Temple Hallway Right Invisible Chest'), lambda state: state.has('Magic Meter') and state.has('Lens of Truth') and state.has('Bomb Bag'))
set_rule(world.get_location('Spirit Temple Boss Key Chest'), lambda state: state.has('Zeldas Lullaby') and state.has('Bow') and state.has('Progressive Hookshot') and state.can_blast())
set_rule(world.get_location('Spirit Temple Topmost Chest'), lambda state: state.has('Mirror Shield'))
set_rule(world.get_location('Twinrova'), lambda state: state.has('Mirror Shield') and state.has('Bomb Bag') and state.has('Progressive Hookshot') and state.has('Boss Key (Spirit Temple)'))
set_rule(world.get_location('Twinrova Heart'), lambda state: state.has('Mirror Shield') and state.has('Bomb Bag') and state.has('Progressive Hookshot') and state.has('Boss Key (Spirit Temple)'))
set_rule(world.get_location('Zelda'), lambda state: state.has('Shadow Medallion') and state.has('Spirit Medallion'))
set_rule(world.get_entrance('Ganons Castle Light Trial'), lambda state: state.has('Progressive Strength Upgrade', 3))
set_rule(world.get_entrance('Ganons Castle Tower'), lambda state: state.has('Forest Trial Clear') and state.has('Fire Trial Clear') and state.has('Water Trial Clear') and state.has('Shadow Trial Clear') and state.has('Spirit Trial Clear') and state.has('Light Trial Clear'))
set_rule(world.get_location('Ganons Castle Forest Trial Clear'), lambda state: state.has('Magic Meter') and state.has('Bow') and state.has('Light Arrows') and (state.has('Fire Arrows') or (state.has('Progressive Hookshot') and state.has('Dins Fire'))))
set_rule(world.get_location('Ganons Castle Fire Trial Clear'), lambda state: state.has_GoronTunic() and state.has('Progressive Strength Upgrade', 3) and state.has('Magic Meter') and state.has('Bow') and state.has('Light Arrows') and state.has('Progressive Hookshot', 2))
set_rule(world.get_location('Ganons Castle Water Trial Clear'), lambda state: state.has_bottle() and state.has('Hammer') and state.has('Magic Meter') and state.has('Bow') and state.has('Light Arrows'))
set_rule(world.get_location('Ganons Castle Shadow Trial Clear'), lambda state: state.has('Magic Meter') and state.has('Bow') and state.has('Light Arrows') and state.has('Hammer') and (state.has('Fire Arrows') or state.has('Progressive Hookshot', 2)) and (state.has('Lens of Truth') or (state.has('Hover Boots') and state.has('Progressive Hookshot', 2))))
set_rule(world.get_location('Ganons Castle Shadow Trial First Chest'), lambda state: (state.has('Magic Meter') and state.has('Bow') and state.has('Fire Arrows')) or state.has('Progressive Hookshot') or state.has('Hover Boots') or state.has('Song of Time'))
set_rule(world.get_location('Ganons Castle Shadow Trial Second Chest'), lambda state: (state.has('Magic Meter') and state.has('Bow') and state.has('Fire Arrows')) or (state.has('Progressive Hookshot', 2) and state.has('Hover Boots')))
set_rule(world.get_location('Ganons Castle Spirit Trial Clear'), lambda state: state.has('Magic Meter') and state.has('Bow') and state.has('Light Arrows') and state.has('Mirror Shield') and state.has('Bomb Bag') and state.has('Progressive Hookshot'))
set_rule(world.get_location('Ganons Castle Spirit Trial First Chest'), lambda state: state.has('Progressive Hookshot') and (state.has('Magic Meter') or state.has('Bomb Bag')))
set_rule(world.get_location('Ganons Castle Spirit Trial Second Chest'), lambda state: state.has('Progressive Hookshot') and state.has('Magic Meter') and state.has('Bomb Bag') and state.has('Lens of Truth'))
set_rule(world.get_location('Ganons Castle Light Trial Clear'), lambda state: state.has('Magic Meter') and state.has('Bow') and state.has('Progressive Hookshot') and state.has('Light Arrows') and state.has('Small Key (Ganons Castle)', 2))
set_rule(world.get_location('Ganons Castle Light Trail Invisible Enemies Chest'), lambda state: state.has('Magic Meter') and state.has('Lens of Truth'))
set_rule(world.get_location('Ganons Castle Light Trial Lullaby Chest'), lambda state: state.has('Zeldas Lullaby') and state.has('Small Key (Ganons Castle)', 1))
set_rule(world.get_location('Ganon'), lambda state: state.has('Boss Key (Ganons Castle)'))
set_rule(world.get_entrance('Kokiri Forest Storms Grotto'), lambda state: state.has('Song of Storms'))
set_rule(world.get_entrance('Lost Woods Generic Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Lost Woods Sales Grotto'), lambda state: state.has('Bomb Bag') or (state.has('Hammer') and state.is_adult() and (state.has('Minuet of Forest') or state.has('Sarias Song'))))
set_rule(world.get_entrance('Front of Meadow Grotto'), lambda state: state.has('Bomb Bag') or (state.has('Hammer') and state.is_adult() and (state.has('Minuet of Forest') or state.has('Sarias Song'))))
set_rule(world.get_entrance('Remote Southern Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field Near Lake Inside Fence Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field Valley Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field West Castle Town Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field Far West Castle Town Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field Kakariko Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Field North Lon Lon Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Castle Storms Grotto'), lambda state: state.has('Song of Storms'))
set_rule(world.get_entrance('Kakariko Bombable Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Mountain Bombable Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Mountain Storms Grotto'), lambda state: state.has('Song of Storms'))
set_rule(world.get_entrance('Top of Crater Grotto'), lambda state: state.can_blast())
set_rule(world.get_entrance('Zora River Plateau Open Grotto'), lambda state: state.has('Bomb Bag') or state.has('Progressive Scale') or state.is_adult())
set_rule(world.get_entrance('Zora River Plateau Bombable Grotto'), lambda state: state.can_blast())
set_rule(world.get_location('Tektite Grotto Freestanding PoH'), lambda state: state.has('Progressive Scale', 2) or (state.has('Iron Boots') and state.is_adult()))
set_rule(world.get_location('GS2'), lambda state: state.has_bottle())
set_rule(world.get_location('GS3'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS4'), lambda state: state.has_bottle())
set_rule(world.get_location('GS5'), lambda state: state.has_bottle())
set_rule(world.get_location('GS6'), lambda state: state.has('Magic Bean'))
set_rule(world.get_location('GS7'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS9'), lambda state: state.has('Slingshot') or state.has('Bomb Bag') or state.has('Boomerang') or (state.has('Dins Fire') and state.has('Magic Meter')))
set_rule(world.get_location('GS11'), lambda state: state.has('Boomerang') and state.has('Bomb Bag'))
set_rule(world.get_location('GS12'), lambda state: (state.has('Boomerang') and state.has('Bomb Bag')) or (state.has('Progressive Hookshot') and state.is_adult()))
set_rule(world.get_location('GS13'), lambda state: (state.has('Hammer') and state.has_fire_source() and state.has('Progressive Hookshot') and state.is_adult()) or (state.has('Boomerang') and state.has('Bomb Bag') and state.has('Dins Fire') and state.has('Magic Meter')))
set_rule(world.get_location('GS16'), lambda state: state.has('Boomerang') and state.has('Bomb Bag'))
set_rule(world.get_location('GS20'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS21'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS26'), lambda state: state.has('Slingshot') or state.has('Bomb Bag'))
set_rule(world.get_location('GS27'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS28'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS29'), lambda state: state.has_bottle())
set_rule(world.get_location('GS30'), lambda state: state.has_bottle() and (state.has('Bomb Bag') or state.has('Progressive Strength Upgrade')))
set_rule(world.get_location('GS31'), lambda state: state.can_blast())
set_rule(world.get_location('GS32'), lambda state: state.has('Hammer') and state.is_adult())
set_rule(world.get_location('GS33'), lambda state: state.has('Hammer') and state.is_adult())
set_rule(world.get_location('GS34'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('GS35'), lambda state: state.is_adult())
set_rule(world.get_location('GS37'), lambda state: state.has('Bolero of Fire') and state.has_bottle())
set_rule(world.get_location('GS39'), lambda state: state.has('Bomb Bag') or (state.has('Boomerang') or state.has('Slingshot') and state.has('Progressive Strength Upgrade')) or (state.has('Dins Fire') and state.has('Magic Meter')) or (state.is_adult and (state.has('Progressive Hookshot') or state.has('Bow') or state.has('Biggoron Sword'))))
set_rule(world.get_location('GS41'), lambda state: (state.has('Progressive Hookshot') and state.is_adult()) or (state.has('Boomerang') and (state.has('Bomb Bag') or state.has('Progressive Strength Upgrade'))))
set_rule(world.get_location('GS42'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS45'), lambda state: state.has('Progressive Hookshot'))
set_rule(world.get_location('GS46'), lambda state: state.has('Progressive Hookshot'))
set_rule(world.get_location('GS47'), lambda state: state.has('Progressive Hookshot') or state.has('Bow') or state.has('Magic Meter'))
set_rule(world.get_location('GS49'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS50'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.can_blast() and state.has('Progressive Hookshot'))
# Jabu Jabu GS need no reqs becuase the access reqs for their zones cover them.
set_rule(world.get_location('GS55'), lambda state: state.has_bottle())
set_rule(world.get_location('GS56'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS58'), lambda state: state.is_adult() and state.has('Progressive Hookshot', 2))
set_rule(world.get_location('GS59'), lambda state: state.is_adult() and state.has('Iron Boots') and state.has('Progressive Hookshot'))
set_rule(world.get_location('GS60'), lambda state: (state.has('Progressive Hookshot') or state.has('Bow') or (state.has('Dins Fire') and state.has('Magic Meter'))) and state.is_adult())
set_rule(world.get_location('GS61'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS62'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS63'), lambda state: (state.has('Progressive Hookshot', 2) or (state.has('Progressive Hookshot') and state.can_reach('Forest Temple Outside Upper Ledge'))) and state.is_adult())
set_rule(world.get_location('GS64'), lambda state: state.has('Progressive Hookshot'))
set_rule(world.get_location('GS65'), lambda state: state.has('Small Key (Fire Temple)', 1) and state.has('Song of Time'))
set_rule(world.get_location('GS66'), lambda state: state.has('Bomb Bag'))
set_rule(world.get_location('GS67'), lambda state: state.has('Small Key (Fire Temple)', 5) and state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS68'), lambda state: state.has('Small Key (Fire Temple)', 5) and state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS69'), lambda state: state.has('Hammer') and state.is_adult())
set_rule(world.get_location('GS70'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS71'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS72'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS73'), lambda state: state.has('Bomb Bag') and state.has('Magic Meter'))
set_rule(world.get_location('GS74'), lambda state: state.has('Song of Time') and state.has('Small Key (Water Temple)', 6))
set_rule(world.get_location('GS75'), lambda state: state.has('Progressive Hookshot', 2))
set_rule(world.get_location('GS76'), lambda state: state.has('Progressive Hookshot', 2))
set_rule(world.get_location('GS77'), lambda state: state.has('Progressive Hookshot', 2) and ((state.has('Bomb Bag') and state.has('Progressive Strength Upgrade')) or state.has('Hover Boots')) and state.has('Small Key (Water Temple)', 6)) #5 keys would be better but it wouldn't be compatible with the key for key scenarios, 6 will be identical pre-keysanity.
set_rule(world.get_location('GS78'), lambda state: state.has('Small Key (Bottom of the Well)', 3) and state.has('Boomerang') and (state.has('Progressive Strength Upgrade') or state.has('Bomb Bag') or (state.has('Lens of Truth') and state.has('Magic Meter'))))
set_rule(world.get_location('GS79'), lambda state: state.has('Small Key (Bottom of the Well)', 3) and state.has('Boomerang'))
set_rule(world.get_location('GS80'), lambda state: state.has('Small Key (Bottom of the Well)', 3) and state.has('Boomerang'))
set_rule(world.get_location('GS81'), lambda state: state.has('Progressive Hookshot'))
set_rule(world.get_location('GS82'), lambda state: state.has('Progressive Hookshot'))
set_rule(world.get_location('GS84'), lambda state: state.has('Progressive Hookshot', 2) and state.has('Progressive Strength Upgrade') and state.has('Small Key (Shadow Temple)', 4))
set_rule(world.get_location('GS86'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS87'), lambda state: state.has_bottle())
set_rule(world.get_location('GS88'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS89'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS90'), lambda state: state.has('Progressive Hookshot') and state.has('Gerudo Membership Card') and state.is_adult())
set_rule(world.get_location('GS92'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS93'), lambda state: state.has_bottle() and state.has('Requiem of Spirit'))
set_rule(world.get_location('GS94'), lambda state: state.has('Progressive Hookshot') and state.is_adult())
set_rule(world.get_location('GS95'), lambda state: ((state.has('Magic Bean') and state.has('Requiem of Spirit')) or state.has('Progressive Hookshot', 2)) and state.is_adult())
set_rule(world.get_location('GS96'), lambda state: state.has('Boomerang'))
set_rule(world.get_location('GS98'), lambda state: (state.has('Boomerang') and state.has('Progressive Hookshot')) or (state.has('Boomerang') and state.has('Small Key (Spirit Temple)', 5) and state.has('Bomb Bag') and state.has('Requiem of Spirit')) or (state.has('Progressive Hookshot') and state.has('Progressive Strength Upgrade', 2) and state.is_adult() and state.has('Small Key (Spirit Temple)', 3)))
set_rule(world.get_location('GS99'), lambda state: state.has('Song of Time') and (state.has('Bow') or state.has('Progressive Hookshot') or state.has('Bomb Bag')))
set_rule(world.get_location('GS100'), lambda state: state.has('Progressive Strength Upgrade', 2) and state.has('Small Key (Spirit Temple)', 3) and state.is_adult() and (state.has('Progressive Hookshot') or state.has('Hover Boots')))
for location in world.get_locations():
if location.type != 'Chest':
forbid_item(location, 'Ice Trap')
| 131.683333
| 540
| 0.738388
| 8,433
| 55,307
| 4.728804
| 0.063323
| 0.143437
| 0.107127
| 0.133908
| 0.898566
| 0.882341
| 0.849792
| 0.787477
| 0.755028
| 0.70174
| 0
| 0.005994
| 0.116188
| 55,307
| 419
| 541
| 131.997613
| 0.809824
| 0.018063
| 0
| 0
| 0
| 0
| 0.335488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020305
| false
| 0.015228
| 0.005076
| 0
| 0.035533
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76fbe9a992b337f707ea9b87cbc7a483ace7f569
| 22,095
|
py
|
Python
|
dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/test/test_update_hooks.py
|
kostenickj/lumberyard
|
e881f3023cc1840650eb7b133e605881d1d4330d
|
[
"AML"
] | 1
|
2019-02-12T06:44:50.000Z
|
2019-02-12T06:44:50.000Z
|
dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/test/test_update_hooks.py
|
santosh90n/lumberyard-1
|
9608bcf905bb60e9f326bd3fe8297381c22d83a6
|
[
"AML"
] | null | null | null |
dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/test/test_update_hooks.py
|
santosh90n/lumberyard-1
|
9608bcf905bb60e9f326bd3fe8297381c22d83a6
|
[
"AML"
] | null | null | null |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the 'License'). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision$
import unittest
import os
import uuid
import resource_manager.uploader
import lmbr_aws_test_support
uploader_call_file_paths = []
update_call_file_paths = []
class IntegrationTest_CloudGemFramework_ResourceManager_UploaderHooks(lmbr_aws_test_support.lmbr_aws_TestCase):
PROJECT_HOOK_NAME = 'Project'
TEST_RESOURCE_GROUP_NAME_1 = 'TestResourceGroup1'
TEST_RESOURCE_GROUP_NAME_2 = 'TestResourceGroup2'
def __init__(self, *args, **kwargs):
super(IntegrationTest_CloudGemFramework_ResourceManager_UploaderHooks, self).__init__(*args, **kwargs)
def setUp(self):
self.prepare_test_envionment("uploader_hooks_test")
# Uploader hooks were deprecated in 1.9. TODO: Remove tests when support is removed.
# Begin deprecated support
UPLOADER_CODE = '''
import os
def upload_resource_group_content_pre(hook_module, resource_group_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_resource_group_content_pre_calls.txt')
print '>>>>>>>>>>> upload_resource_group_pre_content', file_path
with open(file_path, 'a') as f:
f.write(resource_group_uploader.key + ',' + resource_group_uploader.resource_group_name + ',' + resource_group_uploader.deployment_uploader.deployment_name + '\\n')
def upload_resource_group_content_post(hook_module, resource_group_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_resource_group_content_post_calls.txt')
print '>>>>>>>>>>> upload_resource_group_post_content', file_path
with open(file_path, 'a') as f:
f.write(resource_group_uploader.key + ',' + resource_group_uploader.resource_group_name + ',' + resource_group_uploader.deployment_uploader.deployment_name + '\\n')
def upload_deployment_content_pre(hook_module, deployment_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_deployment_content_pre_calls.txt')
print '>>>>>>>>>>> upload_deployment_pre_content', file_path
with open(file_path, 'a') as f:
f.write(deployment_uploader.key + ',' + deployment_uploader.deployment_name + '\\n')
def upload_deployment_content_post(hook_module, deployment_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_deployment_content_post_calls.txt')
print '>>>>>>>>>>> upload_deployment_post_content', file_path
with open(file_path, 'a') as f:
f.write(deployment_uploader.key + ',' + deployment_uploader.deployment_name + '\\n')
def upload_project_content_pre(hook_module, project_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_project_content_pre_calls.txt')
print '>>>>>>>>>>> upload_project_pre_content', file_path
with open(file_path, 'a') as f:
f.write(project_uploader.key + ',' + '\\n')
def upload_project_content_post(hook_module, project_uploader):
file_path = os.path.join(os.path.dirname(__file__), 'upload_project_content_post_calls.txt')
print '>>>>>>>>>>> upload_project_post_content', file_path
with open(file_path, 'a') as f:
f.write(project_uploader.key + ',' + '\\n')
'''
def __create_uploader_hook(self, directory_path):
global uploader_call_file_paths
plugin_path = os.path.join(directory_path, 'cli-plugin-code')
if not os.path.exists(plugin_path):
os.makedirs(plugin_path)
file_path = os.path.join(plugin_path, 'upload.py')
with open(file_path, 'w') as f:
f.write(self.UPLOADER_CODE)
print '>>>>>>>>>>> created uploader', file_path
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_resource_group_content_pre_calls.txt'))
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_resource_group_content_post_calls.txt'))
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_deployment_content_post_calls.txt'))
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_deployment_content_pre_calls.txt'))
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_project_content_pre_calls.txt'))
uploader_call_file_paths.append(os.path.join(plugin_path, 'upload_project_content_post_calls.txt'))
def __delete_uploader_call_files(self):
global uploader_call_file_paths
for file_path in uploader_call_file_paths:
if os.path.isfile(file_path):
os.remove(file_path)
def __assert_uploader_call_files(self, expected_list):
global uploader_call_file_paths
for file_path in uploader_call_file_paths:
was_expected = False
for expected in expected_list:
if expected in file_path:
was_expected = True
if was_expected:
self.assertTrue(os.path.isfile(file_path), msg='Expected ' + file_path)
else:
self.assertFalse(os.path.isfile(file_path), msg='Did not expect ' + file_path)
# End deprecated support
# The hooks here do not have an **kwargs parameter so that we can test that all the expected
# args, and only the expected args, are provided.
UPDATE_CODE = '''
import os
def log_resource_group_hook_call(base_name, hook, deployment_name, resource_group_name, resource_group_uploader, **kwargs):
file_path = os.path.join(os.path.dirname(__file__), base_name + '_{HOOK_NAME}_' + resource_group_name + '.txt')
print '>>>>>>>>>>> ' + base_name + '_{HOOK_NAME}_' + resource_group_name, file_path
with open(file_path, 'a') as f:
f.write(hook.context.config.root_directory_path + ',' + resource_group_uploader.key + ',' + resource_group_name + ',' + deployment_name + '\\n')
def before_this_resource_group_updated(hook, deployment_name, resource_group_name, resource_group_uploader, **kwargs):
log_resource_group_hook_call('before_this_resource_group_updated', hook, deployment_name, resource_group_name, resource_group_uploader)
def after_this_resource_group_updated(hook, deployment_name, resource_group_name, resource_group_uploader, **kwargs):
log_resource_group_hook_call('after_this_resource_group_updated', hook, deployment_name, resource_group_name, resource_group_uploader)
def before_resource_group_updated(hook, deployment_name, resource_group_name, resource_group_uploader, **kwargs):
log_resource_group_hook_call('before_resource_group_updated', hook, deployment_name, resource_group_name, resource_group_uploader)
def after_resource_group_updated(hook, deployment_name, resource_group_name, resource_group_uploader, **kwargs):
log_resource_group_hook_call('after_resource_group_updated', hook, deployment_name, resource_group_name, resource_group_uploader)
def log_project_hook_call(base_name, hook, project_uploader, **kwargs):
file_path = os.path.join(os.path.dirname(__file__), base_name + '_{HOOK_NAME}.txt')
print '>>>>>>>>>>> ' + base_name + '_{HOOK_NAME}', file_path
with open(file_path, 'a') as f:
f.write(hook.context.config.root_directory_path + ',' + project_uploader.key + '\\n')
def before_project_updated(hook, project_uploader, **kwargs):
log_project_hook_call('before_project_updated', hook, project_uploader)
def after_project_updated(hook, project_uploader, **kwargs):
log_project_hook_call('after_project_updated', hook, project_uploader)
'''
def __create_update_hooks(self, directory_path, hook_name):
global update_call_file_paths
plugin_path = os.path.join(directory_path, 'resource-manager-code')
if not os.path.exists(plugin_path):
os.makedirs(plugin_path)
file_path = os.path.join(plugin_path, 'update.py')
with open(file_path, 'w') as f:
f.write(self.UPDATE_CODE.format(HOOK_NAME=hook_name))
print '>>>>>>>>>>> created update hook', hook_name, file_path
update_call_file_paths.append(os.path.join(plugin_path, 'before_this_resource_group_updated_{HOOK_NAME}_{RESOURCE_GROUP_NAME}.txt'.format(HOOK_NAME=hook_name, RESOURCE_GROUP_NAME=self.TEST_RESOURCE_GROUP_NAME_1)))
update_call_file_paths.append(os.path.join(plugin_path, 'after_this_resource_group_updated_{HOOK_NAME}_{RESOURCE_GROUP_NAME}.txt'.format(HOOK_NAME=hook_name, RESOURCE_GROUP_NAME=self.TEST_RESOURCE_GROUP_NAME_1)))
update_call_file_paths.append(os.path.join(plugin_path, 'before_resource_group_updated_{HOOK_NAME}_{RESOURCE_GROUP_NAME}.txt'.format(HOOK_NAME=hook_name, RESOURCE_GROUP_NAME=self.TEST_RESOURCE_GROUP_NAME_1)))
update_call_file_paths.append(os.path.join(plugin_path, 'after_resource_group_updated_{HOOK_NAME}_{RESOURCE_GROUP_NAME}.txt'.format(HOOK_NAME=hook_name, RESOURCE_GROUP_NAME=self.TEST_RESOURCE_GROUP_NAME_1)))
update_call_file_paths.append(os.path.join(plugin_path, 'before_project_updated_{HOOK_NAME}.txt'.format(HOOK_NAME=hook_name)))
update_call_file_paths.append(os.path.join(plugin_path, 'after_project_updated_{HOOK_NAME}.txt'.format(HOOK_NAME=hook_name)))
def __delete_update_call_files(self):
global update_call_file_paths
for file_path in update_call_file_paths:
if os.path.isfile(file_path):
os.remove(file_path)
def __assert_update_call_files(self, *expected_lists):
expected_list = []
for list in expected_lists:
expected_list.extend(list)
print '>>> checking for', expected_list
global update_call_file_paths
for file_path in update_call_file_paths:
was_expected = False
for expected in expected_list:
if expected in file_path:
was_expected = True
if was_expected:
self.assertTrue(os.path.isfile(file_path), msg='Expected ' + file_path)
else:
self.assertFalse(os.path.isfile(file_path), msg='Did not expect ' + file_path)
def __get_update_call_file_list(self, base_name, hook_names, resource_group_name = None):
if resource_group_name is None:
return [ base_name + '_' + hook_name for hook_name in hook_names ]
else:
return [ base_name + '_' + hook_name + '_' + resource_group_name for hook_name in hook_names ]
def test_update_hooks_end_to_end(self):
self.run_all_tests()
def __010_initialize_project_files(self):
self.lmbr_aws('project', 'create', '--files-only', '--region', lmbr_aws_test_support.REGION)
def __020_create_resource_groups(self):
self.lmbr_aws(
'cloud-gem', 'create',
'--gem', self.TEST_RESOURCE_GROUP_NAME_1,
'--initial-content', 'no-resources',
'--enable')
self.lmbr_aws(
'cloud-gem', 'create',
'--gem', self.TEST_RESOURCE_GROUP_NAME_2,
'--initial-content', 'no-resources',
'--enable')
def __030_create_update_hooks(self):
# Uploader hooks were deprecated in 1.9. TODO: Remove tests when support is removed.
self.__create_uploader_hook(self.AWS_DIR)
self.__create_uploader_hook(self.get_gem_aws_path(self.TEST_RESOURCE_GROUP_NAME_1))
resource_manager.uploader._uploader_hook_modules = None
self.__create_update_hooks(self.AWS_DIR, self.PROJECT_HOOK_NAME)
self.__create_update_hooks(self.get_gem_aws_path(self.TEST_RESOURCE_GROUP_NAME_1), self.TEST_RESOURCE_GROUP_NAME_1)
self.__create_update_hooks(self.get_gem_aws_path(self.TEST_RESOURCE_GROUP_NAME_2), self.TEST_RESOURCE_GROUP_NAME_2)
def __040_create_project_stack(self):
self.__delete_uploader_call_files() # deprecated
self.__delete_update_call_files()
self.lmbr_aws('project', 'create', '--stack-name', self.TEST_PROJECT_STACK_NAME, '--confirm-aws-usage', '--confirm-security-change', '--region', lmbr_aws_test_support.REGION)
self.__assert_uploader_call_files(['project_content_pre','project_content_post'])
self.__assert_update_call_files(
self.__get_update_call_file_list('before_project_updated', [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ]),
self.__get_update_call_file_list('after_project_updated', [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ])
)
def __050_create_deployment_stack(self):
self.__delete_uploader_call_files()
self.__delete_update_call_files()
self.lmbr_aws('deployment', 'create', '--deployment', self.TEST_DEPLOYMENT_NAME, '--confirm-aws-usage', '--confirm-security-change')
self.__assert_uploader_call_files(['deployment_content_pre', 'deployment_content_post', 'resource_group_content_pre','resource_group_content_post'])
self.__assert_update_call_files(
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2)
)
def __060_disable_resource_group(self):
self.lmbr_aws('resource-group', 'disable', '--resource-group', self.TEST_RESOURCE_GROUP_NAME_1)
def __070_delete_resource_group_stack(self):
self.__delete_update_call_files()
self.lmbr_aws('resource-group', 'update', '--deployment', self.TEST_DEPLOYMENT_NAME, '--resource-group', self.TEST_RESOURCE_GROUP_NAME_1, '--confirm-resource-deletion')
self.__assert_update_call_files(
# no hooks should have been called when deleting
)
def __080_enable_resource_group(self):
self.lmbr_aws('resource-group', 'enable', '--resource-group', self.TEST_RESOURCE_GROUP_NAME_1)
def __090_recreate_resource_group_stack(self):
self.__delete_uploader_call_files()
self.__delete_update_call_files()
self.lmbr_aws('resource-group', 'update', '--deployment', self.TEST_DEPLOYMENT_NAME, '--resource-group', self.TEST_RESOURCE_GROUP_NAME_1, '--confirm-aws-usage', '--confirm-security-change', '--verbose')
self.__assert_uploader_call_files(['resource_group_content_pre','resource_group_content_post'])
self.__assert_update_call_files(
# only hooks for the resource group should have been called
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
)
def __100_project_updates(self):
self.__delete_uploader_call_files()
self.__delete_update_call_files()
self.lmbr_aws('project', 'update', '--confirm-aws-usage', '--confirm-security-change')
self.__assert_uploader_call_files(['project_content_pre','project_content_post'])
self.__assert_update_call_files(
self.__get_update_call_file_list('before_project_updated', [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ]),
self.__get_update_call_file_list('after_project_updated', [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ])
)
def __110_deployment_updates(self):
self.__delete_uploader_call_files()
self.__delete_update_call_files()
self.lmbr_aws('deployment', 'update', '--deployment', self.TEST_DEPLOYMENT_NAME, '--confirm-aws-usage')
self.__assert_uploader_call_files(['deployment_content_pre', 'deployment_content_post', 'resource_group_content_pre','resource_group_content_post'])
self.__assert_update_call_files(
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_2)
)
def __120_resource_group_updates(self):
self.__delete_uploader_call_files()
self.__delete_update_call_files()
self.lmbr_aws('resource-group', 'update', '--deployment', self.TEST_DEPLOYMENT_NAME, '--resource-group', self.TEST_RESOURCE_GROUP_NAME_1, '--confirm-aws-usage')
self.__assert_uploader_call_files(['resource_group_content_pre','resource_group_content_post'])
self.__assert_update_call_files(
# only hooks for the resource group should have been called
self.__get_update_call_file_list('before_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_this_resource_group_updated', hook_names = [ self.TEST_RESOURCE_GROUP_NAME_1 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('before_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
self.__get_update_call_file_list('after_resource_group_updated', hook_names = [ self.PROJECT_HOOK_NAME, self.TEST_RESOURCE_GROUP_NAME_1, self.TEST_RESOURCE_GROUP_NAME_2 ], resource_group_name = self.TEST_RESOURCE_GROUP_NAME_1),
)
def __900_delete_deployment(self):
self.__delete_update_call_files()
self.lmbr_aws('deployment', 'delete', '--deployment', self.TEST_DEPLOYMENT_NAME, '--confirm-resource-deletion')
self.__assert_update_call_files(
# no hooks should have been called when deleting
)
def __999_delete_project(self):
self.__delete_update_call_files()
self.lmbr_aws('delete-project-stack', '--confirm-resource-deletion')
self.__assert_update_call_files(
# no hooks should have been called when deleting
)
| 59.555256
| 240
| 0.749491
| 3,028
| 22,095
| 4.918758
| 0.070013
| 0.198133
| 0.154089
| 0.121257
| 0.870418
| 0.830804
| 0.801329
| 0.781657
| 0.770847
| 0.753256
| 0
| 0.007159
| 0.152885
| 22,095
| 370
| 241
| 59.716216
| 0.788588
| 0.051369
| 0
| 0.478431
| 0
| 0.047059
| 0.351072
| 0.212631
| 0
| 0
| 0
| 0.002703
| 0.082353
| 0
| null | null | 0
| 0.027451
| null | null | 0.043137
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a60d30a37a7de5af94bcb9ce6ea6d34494fdc9a
| 156
|
py
|
Python
|
tests/__init__.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 32
|
2022-01-12T16:52:28.000Z
|
2022-03-24T18:05:47.000Z
|
tests/__init__.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 3
|
2022-01-12T17:01:55.000Z
|
2022-02-12T15:14:16.000Z
|
tests/__init__.py
|
0xOmarA/RadixLib
|
85d75a47d4c4df4c1a319b74857ae2c513933623
|
[
"MIT"
] | 1
|
2022-01-21T04:28:07.000Z
|
2022-01-21T04:28:07.000Z
|
# type: ignore
from tests.api_types import *
from tests.actions import *
from tests.test_derive import TestDerive
from tests.test_signer import TestSigner
| 22.285714
| 40
| 0.820513
| 23
| 156
| 5.434783
| 0.565217
| 0.288
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 156
| 7
| 41
| 22.285714
| 0.919118
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6a8e338bc517cff15941cb84ef594bbb6a62e0a3
| 303
|
py
|
Python
|
rastervision/v2/core/runner/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2019-11-07T10:02:23.000Z
|
2019-11-07T10:02:23.000Z
|
rastervision/v2/core/runner/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/v2/core/runner/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from rastervision.v2.core.runner.aws_batch_runner import (AWSBatchRunner,
AWS_BATCH)
from rastervision.v2.core.runner.inprocess_runner import (InProcessRunner,
INPROCESS)
| 43.285714
| 74
| 0.518152
| 24
| 303
| 6.375
| 0.541667
| 0.20915
| 0.235294
| 0.287582
| 0.366013
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.429043
| 303
| 6
| 75
| 50.5
| 0.867052
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
6a8f37da262a5847c49109314f8e97f655a4d252
| 28,934
|
py
|
Python
|
src/FTR_trainer.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 40
|
2022-03-02T06:12:43.000Z
|
2022-03-30T02:17:02.000Z
|
src/FTR_trainer.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 6
|
2022-03-06T03:53:14.000Z
|
2022-03-31T06:36:34.000Z
|
src/FTR_trainer.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 5
|
2022-03-04T06:39:44.000Z
|
2022-03-28T04:58:32.000Z
|
import time
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from datasets.dataset_FTR import *
from src.models.FTR_model import *
from .inpainting_metrics import get_inpainting_metrics
from .utils import Progbar, create_dir, stitch_images, SampleEdgeLineLogits
class LaMa:
def __init__(self, config, gpu, rank, test=False):
self.config = config
self.device = gpu
self.global_rank = rank
self.model_name = 'inpaint'
kwargs = dict(config.training_model)
kwargs.pop('kind')
self.inpaint_model = LaMaInpaintingTrainingModule(config, gpu=gpu, rank=rank, test=test, **kwargs).to(gpu)
self.train_dataset = ImgDataset(config.TRAIN_FLIST, config.INPUT_SIZE, config.MASK_RATE, config.TRAIN_MASK_FLIST,
augment=True, training=True, test_mask_path=None)
if config.DDP:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config.world_size,
rank=self.global_rank, shuffle=True)
# else:
# self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=1, rank=0, shuffle=True)
self.val_dataset = ImgDataset(config.VAL_FLIST, config.INPUT_SIZE, mask_rates=None, mask_path=None, augment=False,
training=False, test_mask_path=config.TEST_MASK_FLIST)
self.sample_iterator = self.val_dataset.create_iterator(config.SAMPLE_SIZE)
self.samples_path = os.path.join(config.PATH, 'samples')
self.results_path = os.path.join(config.PATH, 'results')
self.val_path = os.path.join(config.PATH, 'validation')
create_dir(self.val_path)
self.log_file = os.path.join(config.PATH, 'log_' + self.model_name + '.dat')
self.best = float("inf") if self.inpaint_model.best is None else self.inpaint_model.best
def save(self):
if self.global_rank == 0:
self.inpaint_model.save()
def train(self):
if self.config.DDP:
train_loader = DataLoader(self.train_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size,
num_workers=12, sampler=self.train_sampler)
else:
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12, shuffle=True)
epoch = 0
keep_training = True
max_iteration = int(float((self.config.MAX_ITERS)))
total = len(self.train_dataset) // self.config.world_size
if total == 0 and self.global_rank == 0:
print('No training data was provided! Check \'TRAIN_FLIST\' value in the configuration file.')
return
while keep_training:
epoch += 1
if self.config.DDP:
self.train_sampler.set_epoch(epoch + 1) # Shuffle each epoch
epoch_start = time.time()
if self.global_rank == 0:
print('\n\nTraining epoch: %d' % epoch)
progbar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter', 'loss_scale'],
verbose=1 if self.global_rank == 0 else 0)
for _, items in enumerate(train_loader):
self.inpaint_model.train()
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
# train
outputs, gen_loss, dis_loss, logs, batch = self.inpaint_model.process(items)
iteration = self.inpaint_model.iteration
if iteration >= max_iteration:
keep_training = False
break
logs = [
("epoch", epoch),
("iter", iteration),
] + [(i, logs[0][i]) for i in logs[0]] + [(i, logs[1][i]) for i in logs[1]]
if self.config.No_Bar:
pass
else:
progbar.add(len(items['image']),
values=logs if self.config.VERBOSE else [x for x in logs if not x[0].startswith('l_')])
# log model at checkpoints
if self.config.LOG_INTERVAL and iteration % self.config.LOG_INTERVAL == 1 and self.global_rank == 0:
self.log(logs)
# sample model at checkpoints
if self.config.SAMPLE_INTERVAL and iteration % self.config.SAMPLE_INTERVAL == 1 and self.global_rank == 0:
self.sample()
# evaluate model at checkpoints
if self.config.EVAL_INTERVAL and iteration % self.config.EVAL_INTERVAL == 1:
if self.global_rank == 0:
print('\nstart eval...\n')
print("Epoch: %d" % epoch)
psnr, ssim, fid = self.eval()
if self.best > fid and self.global_rank == 0:
self.best = fid
print("current best epoch is %d" % epoch)
print('\nsaving %s...\n' % self.inpaint_model.name)
raw_model = self.inpaint_model.generator.module if \
hasattr(self.inpaint_model.generator, "module") else self.inpaint_model.generator
torch.save({
'iteration': self.inpaint_model.iteration,
'generator': raw_model.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_gen.pth'))
raw_model = self.inpaint_model.discriminator.module if \
hasattr(self.inpaint_model.discriminator, "module") else self.inpaint_model.discriminator
torch.save({
'discriminator': raw_model.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_dis.pth'))
# save model at checkpoints
if self.config.SAVE_INTERVAL and iteration % self.config.SAVE_INTERVAL == 1 and self.global_rank == 0:
self.save()
if self.global_rank == 0:
print("Epoch: %d, time for one epoch: %d seconds" % (epoch, time.time() - epoch_start))
logs = [('Epoch', epoch), ('time', time.time() - epoch_start)]
self.log(logs)
print('\nEnd training....')
def eval(self):
if self.config.DDP:
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size, ## BS of each GPU
num_workers=12)
else:
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12)
total = len(self.val_dataset)
self.inpaint_model.eval()
if self.config.No_Bar:
pass
else:
progbar = Progbar(total, width=20, stateful_metrics=['it'])
iteration = 0
with torch.no_grad():
for items in tqdm(val_loader):
iteration += 1
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
b, _, _, _ = items['image'].size()
# inpaint model
# eval
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
# save
outputs_merged *= 255.0
outputs_merged = outputs_merged.permute(0, 2, 3, 1).int().cpu().numpy()
for img_num in range(b):
cv2.imwrite(self.val_path + '/' + items['name'][img_num], outputs_merged[img_num, :, :, ::-1])
our_metric = get_inpainting_metrics(self.val_path, self.config.GT_Val_FOLDER, None, fid_test=True)
if self.global_rank == 0:
print("iter: %d, PSNR: %f, SSIM: %f, FID: %f, LPIPS: %f" %
(self.inpaint_model.iteration, float(our_metric['psnr']), float(our_metric['ssim']),
float(our_metric['fid']), float(our_metric['lpips'])))
logs = [('iter', self.inpaint_model.iteration), ('PSNR', float(our_metric['psnr'])),
('SSIM', float(our_metric['ssim'])), ('FID', float(our_metric['fid'])), ('LPIPS', float(our_metric['lpips']))]
self.log(logs)
return float(our_metric['psnr']), float(our_metric['ssim']), float(our_metric['fid'])
def sample(self, it=None):
# do not sample when validation set is empty
if len(self.val_dataset) == 0:
return
self.inpaint_model.eval()
with torch.no_grad():
items = next(self.sample_iterator)
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
# inpaint model
iteration = self.inpaint_model.iteration
inputs = (items['image'] * (1 - items['mask']))
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
if it is not None:
iteration = it
image_per_row = 2
if self.config.SAMPLE_SIZE <= 6:
image_per_row = 1
images = stitch_images(
self.postprocess(items['image'].cpu()),
self.postprocess(inputs.cpu()),
self.postprocess(items['mask'].cpu()),
self.postprocess(items['predicted_image'].cpu()),
self.postprocess(outputs_merged.cpu()),
img_per_row=image_per_row
)
path = os.path.join(self.samples_path, self.model_name)
name = os.path.join(path, str(iteration).zfill(5) + ".png")
create_dir(path)
print('\nsaving sample ' + name)
images.save(name)
def log(self, logs):
with open(self.log_file, 'a') as f:
f.write('%s\n' % ' '.join([str(item[0]) + '\t' + str(item[1]) for item in logs]))
def cuda(self, *args):
return (item.to(self.config.DEVICE) for item in args)
def postprocess(self, img):
# [0, 1] => [0, 255]
img = img * 255.0
img = img.permute(0, 2, 3, 1)
return img.int()
class ZITS:
def __init__(self, config, gpu, rank, test=False):
self.config = config
self.device = gpu
self.global_rank = rank
self.model_name = 'inpaint'
kwargs = dict(config.training_model)
kwargs.pop('kind')
self.inpaint_model = DefaultInpaintingTrainingModule(config, gpu=gpu, rank=rank, test=test, **kwargs).to(gpu)
if config.min_sigma is None:
min_sigma = 2.0
else:
min_sigma = config.min_sigma
if config.max_sigma is None:
max_sigma = 2.5
else:
max_sigma = config.max_sigma
if config.round is None:
round = 1
else:
round = config.round
if not test:
self.train_dataset = DynamicDataset(config.TRAIN_FLIST, mask_path=config.TRAIN_MASK_FLIST,
batch_size=config.BATCH_SIZE // config.world_size,
pos_num=config.rel_pos_num, augment=True, training=True,
test_mask_path=None, train_line_path=config.train_line_path,
add_pos=config.use_MPE, world_size=config.world_size,
min_sigma=min_sigma, max_sigma=max_sigma, round=round)
if config.DDP:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config.world_size,
rank=self.global_rank, shuffle=True)
else:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=1, rank=0, shuffle=True)
self.samples_path = os.path.join(config.PATH, 'samples')
self.results_path = os.path.join(config.PATH, 'results')
self.log_file = os.path.join(config.PATH, 'log_' + self.model_name + '.dat')
self.best = float("inf") if self.inpaint_model.best is None else self.inpaint_model.best
self.val_dataset = DynamicDataset(config.VAL_FLIST, mask_path=None, pos_num=config.rel_pos_num,
batch_size=config.BATCH_SIZE, augment=False, training=False,
test_mask_path=config.TEST_MASK_FLIST,
eval_line_path=config.eval_line_path,
add_pos=config.use_MPE, input_size=config.INPUT_SIZE,
min_sigma=min_sigma, max_sigma=max_sigma)
self.sample_iterator = self.val_dataset.create_iterator(config.SAMPLE_SIZE)
self.val_path = os.path.join(config.PATH, 'validation')
create_dir(self.val_path)
def save(self):
if self.global_rank == 0:
self.inpaint_model.save()
def train(self):
if self.config.DDP:
train_loader = DataLoader(self.train_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size,
num_workers=12, sampler=self.train_sampler)
else:
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12,
sampler=self.train_sampler)
epoch = self.inpaint_model.iteration // len(train_loader)
keep_training = True
max_iteration = int(float((self.config.MAX_ITERS)))
total = len(self.train_dataset) // self.config.world_size
if total == 0 and self.global_rank == 0:
print('No training data was provided! Check \'TRAIN_FLIST\' value in the configuration file.')
return
while keep_training:
epoch += 1
if self.config.DDP or self.config.DP:
self.train_sampler.set_epoch(epoch + 1)
if self.config.fix_256 is None or self.config.fix_256 is False:
self.train_dataset.reset_dataset(self.train_sampler)
epoch_start = time.time()
if self.global_rank == 0:
print('\n\nTraining epoch: %d' % epoch)
progbar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter', 'loss_scale',
'g_lr', 'd_lr', 'str_lr', 'img_size'],
verbose=1 if self.global_rank == 0 else 0)
for _, items in enumerate(train_loader):
iteration = self.inpaint_model.iteration
self.inpaint_model.train()
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
image_size = items['image'].shape[2]
random_add_v = random.random() * 1.5 + 1.5
random_mul_v = random.random() * 1.5 + 1.5 # [1.5~3]
# random mix the edge and line
if iteration > int(self.config.MIX_ITERS):
b, _, _, _ = items['edge'].shape
if int(self.config.MIX_ITERS) < iteration < int(self.config.Turning_Point):
pred_rate = (iteration - int(self.config.MIX_ITERS)) / \
(int(self.config.Turning_Point) - int(self.config.MIX_ITERS))
b = np.clip(int(pred_rate * b), 2, b)
iteration_num_for_pred = int(random.random() * 5) + 1
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=iteration_num_for_pred,
add_v=0.05, mul_v=4)
edge_pred = edge_pred.detach().to(torch.float32)
line_pred = line_pred.detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
if image_size < 300 and random.random() < 0.5:
edge_pred = F.interpolate(edge_pred, size=(image_size, image_size), mode='nearest')
line_pred = F.interpolate(line_pred, size=(image_size, image_size), mode='nearest')
else:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + random_add_v) * random_mul_v)
edge_pred = F.interpolate(edge_pred, size=(image_size, image_size), mode='bilinear',
align_corners=False)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + random_add_v) * random_mul_v)
line_pred = F.interpolate(line_pred, size=(image_size, image_size), mode='bilinear',
align_corners=False)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# train
outputs, gen_loss, dis_loss, logs, batch = self.inpaint_model.process(items)
if iteration >= max_iteration:
keep_training = False
break
logs = [("epoch", epoch), ("iter", iteration)] + \
[(i, logs[0][i]) for i in logs[0]] + [(i, logs[1][i]) for i in logs[1]]
logs.append(("g_lr", self.inpaint_model.g_scheduler.get_lr()[0]))
logs.append(("d_lr", self.inpaint_model.d_scheduler.get_lr()[0]))
logs.append(("str_lr", self.inpaint_model.str_scheduler.get_lr()[0]))
logs.append(("img_size", batch['size_ratio'][0].item() * 256))
progbar.add(len(items['image']),
values=logs if self.config.VERBOSE else [x for x in logs if not x[0].startswith('l_')])
# log model at checkpoints
if self.config.LOG_INTERVAL and iteration % self.config.LOG_INTERVAL == 0 and self.global_rank == 0:
self.log(logs)
# sample model at checkpoints
if self.config.SAMPLE_INTERVAL and iteration > 0 and iteration % self.config.SAMPLE_INTERVAL == 0 and self.global_rank == 0:
self.sample()
# evaluate model at checkpoints
if self.config.EVAL_INTERVAL and iteration > 0 and iteration % self.config.EVAL_INTERVAL == 0 and self.global_rank == 0:
print('\nstart eval...\n')
print("Epoch: %d" % epoch)
psnr, ssim, fid = self.eval()
if self.best > fid:
self.best = fid
print("current best epoch is %d" % epoch)
print('\nsaving %s...\n' % self.inpaint_model.name)
raw_model = self.inpaint_model.generator.module if \
hasattr(self.inpaint_model.generator, "module") else self.inpaint_model.generator
raw_encoder = self.inpaint_model.str_encoder.module if \
hasattr(self.inpaint_model.str_encoder, "module") else self.inpaint_model.str_encoder
torch.save({
'iteration': self.inpaint_model.iteration,
'generator': raw_model.state_dict(),
'str_encoder': raw_encoder.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH,
self.inpaint_model.name + '_best_gen_HR.pth'))
raw_model = self.inpaint_model.discriminator.module if \
hasattr(self.inpaint_model.discriminator, "module") else self.inpaint_model.discriminator
torch.save({
'discriminator': raw_model.state_dict()
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_dis_HR.pth'))
# save model at checkpoints
if self.config.SAVE_INTERVAL and iteration > 0 and iteration % self.config.SAVE_INTERVAL == 0 and self.global_rank == 0:
self.save()
if self.global_rank == 0:
print("Epoch: %d, time for one epoch: %d seconds" % (epoch, time.time() - epoch_start))
logs = [('Epoch', epoch), ('time', time.time() - epoch_start)]
self.log(logs)
print('\nEnd training....')
def eval(self):
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12)
self.inpaint_model.eval()
with torch.no_grad():
for items in tqdm(val_loader):
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
b, _, _, _ = items['edge'].shape
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=5,
add_v=0.05, mul_v=4,
device=self.device)
edge_pred, line_pred = edge_pred[:b, ...].detach().to(torch.float32), \
line_pred[:b, ...].detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + 2) * 2)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# eval
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
# save
outputs_merged *= 255.0
outputs_merged = outputs_merged.permute(0, 2, 3, 1).int().cpu().numpy()
for img_num in range(b):
cv2.imwrite(self.val_path + '/' + items['name'][img_num], outputs_merged[img_num, :, :, ::-1])
our_metric = get_inpainting_metrics(self.val_path, self.config.GT_Val_FOLDER, None, fid_test=True)
if self.global_rank == 0:
print("iter: %d, PSNR: %f, SSIM: %f, FID: %f, LPIPS: %f" %
(self.inpaint_model.iteration, float(our_metric['psnr']), float(our_metric['ssim']),
float(our_metric['fid']), float(our_metric['lpips'])))
logs = [('iter', self.inpaint_model.iteration), ('PSNR', float(our_metric['psnr'])),
('SSIM', float(our_metric['ssim'])), ('FID', float(our_metric['fid'])),
('LPIPS', float(our_metric['lpips']))]
self.log(logs)
return float(our_metric['psnr']), float(our_metric['ssim']), float(our_metric['fid'])
def sample(self, it=None):
# do not sample when validation set is empty
if len(self.val_dataset) == 0:
return
self.inpaint_model.eval()
with torch.no_grad():
items = next(self.sample_iterator)
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
b, _, _, _ = items['edge'].shape
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=5,
add_v=0.05, mul_v=4,
device=self.device)
edge_pred, line_pred = edge_pred[:b, ...].detach().to(torch.float32), \
line_pred[:b, ...].detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + 2) * 2)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# inpaint model
iteration = self.inpaint_model.iteration
inputs = (items['image'] * (1 - items['mask']))
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
if it is not None:
iteration = it
image_per_row = 2
if self.config.SAMPLE_SIZE <= 6:
image_per_row = 1
images = stitch_images(
self.postprocess((items['image']).cpu()),
self.postprocess((inputs).cpu()),
self.postprocess(items['edge'].cpu()),
self.postprocess(items['line'].cpu()),
self.postprocess(items['mask'].cpu()),
self.postprocess((items['predicted_image']).cpu()),
self.postprocess((outputs_merged).cpu()),
img_per_row=image_per_row
)
path = os.path.join(self.samples_path, self.model_name)
name = os.path.join(path, str(iteration).zfill(6) + ".jpg")
create_dir(path)
print('\nsaving sample ' + name)
images.save(name)
def log(self, logs):
with open(self.log_file, 'a') as f:
f.write('%s\n' % ' '.join([str(item[0]) + '\t' + str(item[1]) for item in logs]))
def cuda(self, *args):
return (item.to(self.config.DEVICE) for item in args)
def postprocess(self, img):
# [0, 1] => [0, 255]
img = img * 255.0
img = img.permute(0, 2, 3, 1)
return img.int()
| 51.301418
| 140
| 0.515414
| 3,214
| 28,934
| 4.446795
| 0.082763
| 0.047579
| 0.071648
| 0.02204
| 0.884411
| 0.868668
| 0.838931
| 0.821858
| 0.800938
| 0.7959
| 0
| 0.015251
| 0.367733
| 28,934
| 563
| 141
| 51.39254
| 0.765989
| 0.020529
| 0
| 0.731441
| 0
| 0.004367
| 0.055738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034935
| false
| 0.004367
| 0.019651
| 0.004367
| 0.080786
| 0.043668
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ac8aca6173687d77e4664fadd51d6ffc82b2493
| 6,067
|
py
|
Python
|
trpg/parsing/functions/stats.py
|
jacobcheatley/trpg
|
59645f3362faad7aabf839999974b4d0c2e316c7
|
[
"MIT"
] | null | null | null |
trpg/parsing/functions/stats.py
|
jacobcheatley/trpg
|
59645f3362faad7aabf839999974b4d0c2e316c7
|
[
"MIT"
] | null | null | null |
trpg/parsing/functions/stats.py
|
jacobcheatley/trpg
|
59645f3362faad7aabf839999974b4d0c2e316c7
|
[
"MIT"
] | null | null | null |
from .base_function import Function
# OTHER/NORMAL STATS
# CURRENT VALUE SETTERS
class IncStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current += self.value
class DecStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current -= self.value
class SetStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current = self.value
# GETTERS
class GetStatFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.other[self.name].current
# RESOURCE STATS
# CURRENT VALUE SETTERS
class IncResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current += self.value
class DecResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current -= self.value
class SetResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current = self.value
# MIN VALUE SETTERS
class IncResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min += self.value
class DecResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min -= self.value
class SetResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min = self.value
# MAX VALUE SETTERS
class IncResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max += self.value
class DecResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max -= self.value
class SetResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max = self.value
# GETTERS
class GetResFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].current
class GetResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].min
class GetResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].max
# HEALTH STATS
# CURRENT VALUE SETTERS
class IncHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current += self.value
class DecHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current -= self.value
class SetHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current = self.value
# MIN VALUE SETTERS
class IncHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min += self.value
class DecHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min -= self.value
class SetHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min = self.value
# MAX VALUE SETTERS
class IncHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max += self.value
class DecHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max -= self.value
class SetHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max = self.value
# GETTERS
class GetHealthFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.current
class GetHealthMinFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.min
class GetHealthMaxFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.max
| 24.171315
| 71
| 0.670183
| 744
| 6,067
| 5.237903
| 0.075269
| 0.096998
| 0.107775
| 0.136515
| 0.854503
| 0.822684
| 0.822684
| 0.793687
| 0.793687
| 0.793687
| 0
| 0.007785
| 0.216582
| 6,067
| 250
| 72
| 24.268
| 0.812119
| 0.034284
| 0
| 0.627451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.366013
| false
| 0.019608
| 0.006536
| 0.045752
| 0.601307
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6aee3e1ef3c2c463f859840bbf53e6cc313cc24b
| 17,159
|
py
|
Python
|
sdk/python/pulumi_datadog/azure/integration.py
|
pulumi/pulumi-datadog
|
dbc3e51b438de20aca4207bf894dbaa5a2db4bca
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2019-09-17T20:41:19.000Z
|
2022-01-07T15:42:07.000Z
|
sdk/python/pulumi_datadog/azure/integration.py
|
pulumi/pulumi-datadog
|
dbc3e51b438de20aca4207bf894dbaa5a2db4bca
|
[
"ECL-2.0",
"Apache-2.0"
] | 86
|
2019-07-08T11:47:05.000Z
|
2022-03-28T21:02:19.000Z
|
sdk/python/pulumi_datadog/azure/integration.py
|
pulumi/pulumi-datadog
|
dbc3e51b438de20aca4207bf894dbaa5a2db4bca
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2019-10-05T10:34:15.000Z
|
2021-08-08T04:14:19.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IntegrationArgs', 'Integration']
@pulumi.input_type
class IntegrationArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str],
tenant_name: pulumi.Input[str],
automute: Optional[pulumi.Input[bool]] = None,
host_filters: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Integration resource.
:param pulumi.Input[str] client_id: Your Azure web application ID.
:param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key.
:param pulumi.Input[str] tenant_name: Your Azure Active Directory ID.
:param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns.
:param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "tenant_name", tenant_name)
if automute is not None:
pulumi.set(__self__, "automute", automute)
if host_filters is not None:
pulumi.set(__self__, "host_filters", host_filters)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
Your Azure web application ID.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
"""
(Required for Initial Creation) Your Azure web application secret key.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="tenantName")
def tenant_name(self) -> pulumi.Input[str]:
"""
Your Azure Active Directory ID.
"""
return pulumi.get(self, "tenant_name")
@tenant_name.setter
def tenant_name(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_name", value)
@property
@pulumi.getter
def automute(self) -> Optional[pulumi.Input[bool]]:
"""
Silence monitors for expected Azure VM shutdowns.
"""
return pulumi.get(self, "automute")
@automute.setter
def automute(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automute", value)
@property
@pulumi.getter(name="hostFilters")
def host_filters(self) -> Optional[pulumi.Input[str]]:
"""
String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
"""
return pulumi.get(self, "host_filters")
@host_filters.setter
def host_filters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_filters", value)
@pulumi.input_type
class _IntegrationState:
def __init__(__self__, *,
automute: Optional[pulumi.Input[bool]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
host_filters: Optional[pulumi.Input[str]] = None,
tenant_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Integration resources.
:param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns.
:param pulumi.Input[str] client_id: Your Azure web application ID.
:param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key.
:param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
:param pulumi.Input[str] tenant_name: Your Azure Active Directory ID.
"""
if automute is not None:
pulumi.set(__self__, "automute", automute)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if host_filters is not None:
pulumi.set(__self__, "host_filters", host_filters)
if tenant_name is not None:
pulumi.set(__self__, "tenant_name", tenant_name)
@property
@pulumi.getter
def automute(self) -> Optional[pulumi.Input[bool]]:
"""
Silence monitors for expected Azure VM shutdowns.
"""
return pulumi.get(self, "automute")
@automute.setter
def automute(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automute", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
Your Azure web application ID.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
(Required for Initial Creation) Your Azure web application secret key.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="hostFilters")
def host_filters(self) -> Optional[pulumi.Input[str]]:
"""
String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
"""
return pulumi.get(self, "host_filters")
@host_filters.setter
def host_filters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_filters", value)
@property
@pulumi.getter(name="tenantName")
def tenant_name(self) -> Optional[pulumi.Input[str]]:
"""
Your Azure Active Directory ID.
"""
return pulumi.get(self, "tenant_name")
@tenant_name.setter
def tenant_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_name", value)
class Integration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automute: Optional[pulumi.Input[bool]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
host_filters: Optional[pulumi.Input[str]] = None,
tenant_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Datadog - Microsoft Azure integration resource. This can be used to create and manage the integrations.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
# Create a new Datadog - Microsoft Azure integration
sandbox = datadog.azure.Integration("sandbox",
client_id="<azure_client_id>",
client_secret="<azure_client_secret_key>",
host_filters="examplefilter:true,example:true",
tenant_name="<azure_tenant_name>")
```
## Import
# Microsoft Azure integrations can be imported using their `tenant name` and `client` id separated with a colon (`:`).
```sh
$ pulumi import datadog:azure/integration:Integration sandbox ${tenant_name}:${client_id}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns.
:param pulumi.Input[str] client_id: Your Azure web application ID.
:param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key.
:param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
:param pulumi.Input[str] tenant_name: Your Azure Active Directory ID.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntegrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Datadog - Microsoft Azure integration resource. This can be used to create and manage the integrations.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
# Create a new Datadog - Microsoft Azure integration
sandbox = datadog.azure.Integration("sandbox",
client_id="<azure_client_id>",
client_secret="<azure_client_secret_key>",
host_filters="examplefilter:true,example:true",
tenant_name="<azure_tenant_name>")
```
## Import
# Microsoft Azure integrations can be imported using their `tenant name` and `client` id separated with a colon (`:`).
```sh
$ pulumi import datadog:azure/integration:Integration sandbox ${tenant_name}:${client_id}
```
:param str resource_name: The name of the resource.
:param IntegrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntegrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automute: Optional[pulumi.Input[bool]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
host_filters: Optional[pulumi.Input[str]] = None,
tenant_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IntegrationArgs.__new__(IntegrationArgs)
__props__.__dict__["automute"] = automute
if client_id is None and not opts.urn:
raise TypeError("Missing required property 'client_id'")
__props__.__dict__["client_id"] = client_id
if client_secret is None and not opts.urn:
raise TypeError("Missing required property 'client_secret'")
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["host_filters"] = host_filters
if tenant_name is None and not opts.urn:
raise TypeError("Missing required property 'tenant_name'")
__props__.__dict__["tenant_name"] = tenant_name
super(Integration, __self__).__init__(
'datadog:azure/integration:Integration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automute: Optional[pulumi.Input[bool]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
host_filters: Optional[pulumi.Input[str]] = None,
tenant_name: Optional[pulumi.Input[str]] = None) -> 'Integration':
"""
Get an existing Integration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automute: Silence monitors for expected Azure VM shutdowns.
:param pulumi.Input[str] client_id: Your Azure web application ID.
:param pulumi.Input[str] client_secret: (Required for Initial Creation) Your Azure web application secret key.
:param pulumi.Input[str] host_filters: String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
:param pulumi.Input[str] tenant_name: Your Azure Active Directory ID.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IntegrationState.__new__(_IntegrationState)
__props__.__dict__["automute"] = automute
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["host_filters"] = host_filters
__props__.__dict__["tenant_name"] = tenant_name
return Integration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def automute(self) -> pulumi.Output[Optional[bool]]:
"""
Silence monitors for expected Azure VM shutdowns.
"""
return pulumi.get(self, "automute")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
"""
Your Azure web application ID.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[str]:
"""
(Required for Initial Creation) Your Azure web application secret key.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="hostFilters")
def host_filters(self) -> pulumi.Output[Optional[str]]:
"""
String of host tag(s) (in the form `key:value,key:value`) defines a filter that Datadog will use when collecting metrics from Azure. Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog. e.x. `env:production,deploymentgroup:red`
"""
return pulumi.get(self, "host_filters")
@property
@pulumi.getter(name="tenantName")
def tenant_name(self) -> pulumi.Output[str]:
"""
Your Azure Active Directory ID.
"""
return pulumi.get(self, "tenant_name")
| 44.801567
| 364
| 0.653476
| 2,078
| 17,159
| 5.193936
| 0.091434
| 0.070323
| 0.070045
| 0.055036
| 0.844344
| 0.825164
| 0.805615
| 0.778468
| 0.76179
| 0.746224
| 0
| 0.000078
| 0.248383
| 17,159
| 382
| 365
| 44.918848
| 0.836784
| 0.387377
| 0
| 0.653659
| 1
| 0
| 0.099163
| 0.00387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156098
| false
| 0.004878
| 0.02439
| 0
| 0.273171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ac8fb13ed3e01d6cb3a8b2e50f9d8e1175d66d9
| 786
|
py
|
Python
|
env2config/tests/test_conversions.py
|
dacjames/env2config
|
dd900754daa112362be78e54a3bb410772c4aff9
|
[
"MIT"
] | null | null | null |
env2config/tests/test_conversions.py
|
dacjames/env2config
|
dd900754daa112362be78e54a3bb410772c4aff9
|
[
"MIT"
] | null | null | null |
env2config/tests/test_conversions.py
|
dacjames/env2config
|
dd900754daa112362be78e54a3bb410772c4aff9
|
[
"MIT"
] | null | null | null |
def test_dotted_lower():
from env2config.conversions import dotted_lower
given = 'FOO_BAR'
expected = 'foo.bar'
result = dotted_lower(given)
assert result == expected
def test_dotted_lower_trailing():
from env2config.conversions import dotted_lower
given = 'FOO_BAR_'
expected = 'foo.bar.'
result = dotted_lower(given)
assert result == expected
def test_dashed_lower():
from env2config.conversions import dashed_lower
given = 'FOO_BAR'
expected = 'foo-bar'
result = dashed_lower(given)
assert result == expected
def test_dashed_lower_trailing():
from env2config.conversions import dashed_lower
given = 'FOO_BAR_'
expected = 'foo-bar-'
result = dashed_lower(given)
assert result == expected
| 19.170732
| 51
| 0.697201
| 94
| 786
| 5.574468
| 0.170213
| 0.152672
| 0.19084
| 0.236641
| 0.96374
| 0.954198
| 0.914122
| 0.914122
| 0.914122
| 0.858779
| 0
| 0.006515
| 0.21883
| 786
| 40
| 52
| 19.65
| 0.846906
| 0
| 0
| 0.666667
| 0
| 0
| 0.076531
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0af5e6fda8dd9371cb529dfe249093e587c5c376
| 3,660
|
py
|
Python
|
tests/proc/test_command_basic.py
|
spookey/git-sh-sync
|
372cbc8168557e0e99d7963da63717242a9491e9
|
[
"MIT"
] | 1
|
2020-11-15T20:37:08.000Z
|
2020-11-15T20:37:08.000Z
|
tests/proc/test_command_basic.py
|
spookey/git-sh-sync
|
372cbc8168557e0e99d7963da63717242a9491e9
|
[
"MIT"
] | null | null | null |
tests/proc/test_command_basic.py
|
spookey/git-sh-sync
|
372cbc8168557e0e99d7963da63717242a9491e9
|
[
"MIT"
] | null | null | null |
from os import path
from pprint import pformat
from git_sh_sync.proc import CHAR_NEWLINE
def test_cmd_init_empty(helpcmd):
res = helpcmd.init('test-command')
assert res.cmd == ['test-command']
assert res.cwd is None
assert res.cin is None
assert res.exc is None
assert res.code is None
assert res.stdout == ''
assert res.stderr == ''
assert res.command == 'test-command'
assert res.launched is False
assert res.success is False
assert res.out == []
assert res.err == []
def test_cmd_init_more(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert res.cmd == ['test-command']
assert res.cwd == path.realpath('test-dir')
assert res.cin == 'test-input'
assert res.exc is None
assert res.code is None
assert res.stdout == ''
assert res.stderr == ''
assert res.command == 'test-command'
assert res.launched is False
assert res.success is False
assert res.out == []
assert res.err == []
def test_cmd_out_err(helpcmd):
res = helpcmd.init('test-command')
assert res.cmd == ['test-command']
assert res.stdout == ''
assert res.out == []
assert res.stderr == ''
assert res.err == []
helpcmd.edit(res, stdout='test\nout', stderr='test\nerr')
assert res.stdout == 'test\nout'
assert res.out == ['test', 'out']
assert res.stderr == 'test\nerr'
assert res.err == ['test', 'err']
def test_cmd_launched_c(helpcmd):
res = helpcmd.init('test-command')
assert res.cmd == ['test-command']
assert res.code is None
assert res.launched is False
helpcmd.edit(res, code=0)
assert res.code == 0
assert res.launched is True
def test_cmd_launched_e(helpcmd):
res = helpcmd.init('test-command')
assert res.cmd == ['test-command']
assert res.exc is None
assert res.launched is False
helpcmd.edit(res, exc='exception')
assert res.exc == 'exception'
assert res.launched is True
def test_cmd_fields_pre(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert res.fields == dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
)
def test_cmd_repr_pre(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert str(res) == pformat(dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
))
def test_cmd_fields_post(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert res.fields == dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
)
helpcmd.edit(res, code=0)
assert res.fields == dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
stdout='',
stderr='',
code=0,
exc=None,
)
def test_cmd_repr_post(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert str(res) == pformat(dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
))
helpcmd.edit(res, code=0)
assert str(res) == pformat(dict(
command='test-command',
cwd=path.realpath('test-dir'),
cin='test-input',
stdout='',
stderr='',
code=0,
exc=None,
))
def test_cmd_repr_repr(helpcmd):
res = helpcmd.init('test-command', cwd='test-dir', cin='test-input')
assert res.repr == '"""{}{}{}"""'.format(
CHAR_NEWLINE, str(res), CHAR_NEWLINE
)
| 27.313433
| 72
| 0.611475
| 497
| 3,660
| 4.432596
| 0.10664
| 0.192011
| 0.070813
| 0.07626
| 0.860191
| 0.788924
| 0.788924
| 0.779392
| 0.736269
| 0.736269
| 0
| 0.002134
| 0.231694
| 3,660
| 133
| 73
| 27.518797
| 0.781294
| 0
| 0
| 0.723214
| 0
| 0
| 0.161202
| 0
| 0
| 0
| 0
| 0
| 0.446429
| 1
| 0.089286
| false
| 0
| 0.026786
| 0
| 0.116071
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7c4dc5a340cb2e6586344f417025706316c7123f
| 32,474
|
py
|
Python
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/operations/storage_accounts_operations.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/operations/storage_accounts_operations.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/operations/storage_accounts_operations.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def check_name_availability(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""Checks that account name is valid and is not in use.
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name:
:class:`StorageAccountCheckNameAvailabilityParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountCheckNameAvailabilityParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameAvailabilityResult
<Fixtures.AcceptanceTestsStorageManagementClient.models.CheckNameAvailabilityResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned. .
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: :class:`StorageAccountCreateParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these
properties, call the API multiple times with one change per call.
This call does not change the storage keys for the account. If you
want to change storage account keys, use the RegenerateKey operation.
The location and name of the storage account cannot be changed after
creation.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to update on the account. Note that
only one property can be changed at a time using this API.
:type parameters: :class:`StorageAccountUpdateParameters
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_name=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param key_name: Possible values include: 'key1', 'key2'
:type key_name: str or :class:`KeyName
<Fixtures.AcceptanceTestsStorageManagementClient.models.KeyName>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<Fixtures.AcceptanceTestsStorageManagementClient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
regenerate_key = models.StorageAccountRegenerateKeyParameters(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 47.476608
| 154
| 0.673554
| 3,441
| 32,474
| 6.17553
| 0.079628
| 0.034824
| 0.028
| 0.030494
| 0.836424
| 0.830541
| 0.8256
| 0.819247
| 0.814259
| 0.811435
| 0
| 0.003761
| 0.23856
| 32,474
| 683
| 155
| 47.54612
| 0.855623
| 0.328725
| 0
| 0.804878
| 0
| 0
| 0.177493
| 0.104876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045732
| false
| 0
| 0.015244
| 0
| 0.134146
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c637563ac4017f735dfd3553f7ff1922af3f994
| 131
|
py
|
Python
|
lectures/lecture00/code/helloWorldBroke.py
|
mateusza/Introduction-to-Python-Numerical-Analysis-for-Engineers-and-Scientist
|
a27144cc8742e67af215e8de781bd208cc1f7436
|
[
"MIT"
] | 101
|
2017-11-28T15:08:25.000Z
|
2022-03-26T13:59:49.000Z
|
lectures/lecture00/code/helloWorldBroke.py
|
mateusza/Introduction-to-Python-Numerical-Analysis-for-Engineers-and-Scientist
|
a27144cc8742e67af215e8de781bd208cc1f7436
|
[
"MIT"
] | 1
|
2017-12-16T19:41:39.000Z
|
2017-12-16T19:41:39.000Z
|
lectures/lecture00/code/helloWorldBroke.py
|
mateusza/Introduction-to-Python-Numerical-Analysis-for-Engineers-and-Scientist
|
a27144cc8742e67af215e8de781bd208cc1f7436
|
[
"MIT"
] | 54
|
2017-12-15T19:19:53.000Z
|
2022-03-01T23:36:55.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
print 'hello world'
| 32.75
| 39
| 0.854962
| 17
| 131
| 5.764706
| 0.529412
| 0.306122
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129771
| 131
| 4
| 40
| 32.75
| 0.859649
| 0
| 0
| 0
| 0
| 0
| 0.085271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.75
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
7cb31e5f01517f10f04b6300e96bddae8d14076e
| 22,870
|
py
|
Python
|
pvrpm/core/modules/failure.py
|
FSEC-Photovoltaics/pvrpm-lcoe
|
dbe0bb30ffa1041ec004f84c57aac44f47bdf6d2
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T16:03:26.000Z
|
2022-03-29T16:03:26.000Z
|
pvrpm/core/modules/failure.py
|
FSEC-Photovoltaics/pvrpm-lcoe
|
dbe0bb30ffa1041ec004f84c57aac44f47bdf6d2
|
[
"BSD-3-Clause"
] | 29
|
2022-02-05T17:27:07.000Z
|
2022-03-06T23:53:50.000Z
|
pvrpm/core/modules/failure.py
|
FSEC-Photovoltaics/pvrpm-lcoe
|
dbe0bb30ffa1041ec004f84c57aac44f47bdf6d2
|
[
"BSD-3-Clause"
] | 2
|
2022-01-13T23:35:19.000Z
|
2022-03-21T15:46:05.000Z
|
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.utils import sample, get_higher_components
from pvrpm.core.modules.monitor import IndepMonitor
class Failure(ABC):
"""
This abstract class defines how a failure should be set up
"""
def __init__(
self,
level: str,
comp_level_df: pd.DataFrame,
case: SamCase,
indep_monitoring: IndepMonitor = None,
):
"""
Initalizes a failure instance
Args:
level (str): The component level this failure is apart of
comp_level_df (:obj:`pd.DataFrame`): The component level dataframe containing the simulation data
case (:obj:`SamCase`): The SAM case for this simulation
indep_monitoring (:obj:`IndepMonitoring`, Optional): For updating static monitoring during simulation
"""
super().__init__()
self.level = level
self.df = comp_level_df
self.case = case
self.fails_per_day = {}
self.indep_monitoring = indep_monitoring
self.last_failure_day = 0
self.mean = None
self.initialize_components()
@abstractmethod
def initialize_components(self):
"""
Initalizes failure data for all components to be tracked during simulation for this failure type
Note:
Updates the underlying dataframes in place
"""
pass
@abstractmethod
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Reinitalize components in a dataframe similiar to the inital initalization. Used for when repairs or other things may occur
Args:
df (:obj:`pd.DataFrame`): The dataframe containing the components to reinitalize
Returns:
:obj:`pd.DataFrame`: The reinitalized components
"""
pass
@abstractmethod
def update(self, day: int):
"""
Perform a failure update for one day in the simulation:
Changes state of a component to failed, incrementing failures and checking warranty only for failed components of each failure type
Args:
day (int): Current day in the simulation
Note:
Updates the underlying dataframes in place
"""
pass
class TotalFailure(Failure):
"""
Describes how a total failure of a component should operate
"""
def initialize_components(self):
component_info = self.case.config[self.level]
df = self.df
failure_modes = list(component_info.get(ck.FAILURE, {}).keys())
self.mean = {} # init mean for each failure mode
possible_failure_times = np.zeros((component_info[ck.NUM_COMPONENT], len(failure_modes)))
for i, mode in enumerate(failure_modes):
self.mean[mode] = 0
# initalize failure mode by type
df[f"failure_by_type_{mode}"] = 0
fail = component_info[ck.FAILURE][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = fail[ck.FRAC] if ck.FRAC in fail else fail[ck.DECAY_FRAC]
# choose a percentage of components to be defective
sample_ = np.random.random_sample(size=component_info[ck.NUM_COMPONENT])
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(list(defective), sample_, np.finfo(np.float32).max)
else:
# setup failure times for each component
possible_failure_times[:, i] = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# initalize failures per day for this failure mode
self.fails_per_day[mode] = np.zeros(self.case.config[ck.LIFETIME_YRS] * 365)
failure_ind = np.argmin(possible_failure_times, axis=1)
df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
df["failure_type"] = [failure_modes[i] for i in failure_ind]
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
component_info = self.case.config[self.level]
failure_modes = list(component_info.get(ck.FAILURE, {}).keys())
fraction_failures = []
num_repaired = len(df)
possible_failure_times = np.zeros((num_repaired, len(failure_modes)))
for i, mode in enumerate(failure_modes):
fail = component_info[ck.FAILURE][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = 0
if fail.get(ck.FRAC, None):
fraction_failures.append(mode)
frac = fail[ck.FRAC]
else:
frac = fail[ck.DECAY_FRAC]
# choose a percentage of modules to be defective
sample_ = np.random.random_sample(size=num_repaired)
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
else:
# setup failure times for each component
possible_failure_times[:, i] = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
failure_ind = np.argmin(possible_failure_times, axis=1)
df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
df["failure_type"] = [failure_modes[i] for i in failure_ind]
# now, need to make sure that our fractional failures percentages are met for all components in this level
# TODO: need to speed this up somehow
if fraction_failures:
# removes the diminishing effect where at the beginning of the simulation frac modules are a defective failure, then frac of frac is defective, etc.
# possible failure times will also include whatever the current failure time is for the component, if its less then a defective one it doesn't change
possible_failure_times = np.zeros((len(self.df), len(fraction_failures) + 1))
possible_failure_times.fill(np.finfo(np.float32).max)
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
for i, mode in enumerate(fraction_failures):
counts = (self.df["failure_type"].astype(str) == mode).sum()
frac = counts / len(self.df)
fail = component_info[ck.FAILURE][mode]
if frac >= fail[ck.FRAC]:
continue
sample_ = np.random.random_sample(size=len(self.df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < (fail[ck.FRAC] - frac)
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(self.df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
possible_failure_times[:, -1] = self.df["time_to_failure"]
failure_ind = np.argmin(possible_failure_times, axis=1)
types = []
for comp, i in enumerate(failure_ind):
if i != len(fraction_failures):
types.append(fraction_failures[i])
else:
types.append(self.df["failure_type"].iloc[comp])
self.df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
self.df["failure_type"] = np.array(types).astype(str)
return df
def update(self, day: int):
df = self.df
# decrement time to failures for operational modules
# TODO: change this to state > 0 once partial failures implemented
df["time_to_failure"] -= 1
failure_modes = list(self.case.config[self.level][ck.FAILURE].keys())
# TODO: change this to state > 0 once partial failures implemented
mask = (df["state"] == 1) & (df["time_to_failure"] < 1)
failed_comps = df.loc[mask].copy()
if len(failed_comps) > 0:
self.last_failure_day = day
failed_comps["time_to_failure"] = 0
failed_comps["cumulative_failures"] += 1
for fail in failure_modes:
fail_mask = failed_comps["failure_type"].astype(str) == fail
failed_comps.loc[fail_mask, f"failure_by_type_{fail}"] += 1
self.fails_per_day[fail][day] += len(failed_comps.loc[fail_mask])
warranty_mask = failed_comps["time_left_on_warranty"] <= 0
failed_comps.loc[warranty_mask, "cumulative_oow_failures"] += 1
failed_comps["state"] = 0
# update time to detection times for component levels with only independent monitoring
# which will have None for monitor times
try:
if failed_comps["monitor_times"].isnull().any():
# monitor and time to detection will be the time to next indep monitoring
indep_monitors = list(self.case.config[self.level][ck.INDEP_MONITOR].keys())
# next indep monitoring is the min of the possible indep monitors for this component level
failed_comps["monitor_times"] = np.amin(self.indep_monitoring.indep_monitoring[indep_monitors])
# in order to calculate the time to detection for component levels only monitoring by an
# independment monitoring with a threshold (no interval), need to instead
# set the nans that will be there to the day in the simulation when these components failed
# so it can be calculated later
failed_comps["monitor_times"] = failed_comps["monitor_times"].fillna(day)
failed_comps["time_to_detection"] = None # failed_comps["monitor_times"].copy()
# fails if no monitoring defined, faster then just doing a check if the column exists or whatever
except KeyError:
pass
df.loc[mask] = failed_comps
else:
# check to see when last failure was for fraction failure, and update components with new failures
# if its been longer then the mean time of the distribution
# this is so if repairs arent occuring due to poor monitoring, failures are still occuring
failure_modes = list(self.case.config[self.level].get(ck.FAILURE, {}).keys())
fraction_failures = []
for mode in failure_modes:
fail = self.case.config[self.level][ck.FAILURE][mode]
if fail.get(ck.FRAC, None):
# extract mean, since some distributions might not have mean defined in params
if self.mean[mode] == 0:
self.mean[mode] = sample(fail[ck.DIST], fail[ck.PARAM], 10000).mean()
if day > (self.mean[mode] + self.last_failure_day):
fraction_failures.append(mode)
self.last_failure_day = day
for mode in fraction_failures:
# fail new fraction of components
# possible failure times will also include whatever the current failure time is for the component, if its less then a defective one it doesn't change
possible_failure_times = np.zeros((len(self.df), len(fraction_failures) + 1))
possible_failure_times.fill(np.finfo(np.float32).max)
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
for i, mode in enumerate(fraction_failures):
fail = self.case.config[self.level][ck.FAILURE][mode]
sample_ = np.random.random_sample(size=len(self.df))
defective = sample_ < fail[ck.FRAC]
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(self.df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
possible_failure_times[:, i] = np.where(
list(defective),
sample_,
np.finfo(np.float32).max,
)
possible_failure_times[:, -1] = self.df["time_to_failure"]
failure_ind = np.argmin(possible_failure_times, axis=1)
types = []
for comp, i in enumerate(failure_ind):
if i != len(fraction_failures):
types.append(fraction_failures[i])
else:
types.append(self.df["failure_type"].iloc[comp])
self.df["time_to_failure"] = np.amin(possible_failure_times, axis=1)
self.df["failure_type"] = np.array(types).astype(str)
class PartialFailure(Failure):
"""
Specifies a decrease in the state of a component via a failure
Unlike total failures, every defined partial failure will have its own object, instead of manaing all of them at once
"""
def __init__(
self,
level: str,
comp_level_df: pd.DataFrame,
case: SamCase,
mode: str,
indep_monitoring: IndepMonitor = None,
):
"""
Initalizes a partial failure instance
Args:
level (str): The component level this failure is apart of
comp_level_df (:obj:`pd.DataFrame`): The component level dataframe containing the simulation data
case (:obj:`SamCase`): The SAM case for this simulation
mode (str): The name of the partial failure mode
indep_monitoring (:obj:`IndepMonitoring`, Optional): For updating static monitoring during simulation
"""
self.mode = mode
super().__init__(level, comp_level_df, case, indep_monitoring=indep_monitoring)
def initialize_components(self):
component_info = self.case.config[self.level]
df = self.df
mode = self.mode
failure_times = None
# initalize failure mode by type
df[f"failure_by_type_{mode}"] = 0
fail = component_info[ck.PARTIAL_FAIL][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
frac = fail[ck.FRAC] if ck.FRAC in fail else fail[ck.DECAY_FRAC]
# choose a percentage of components to be defective
sample_ = np.random.random_sample(size=component_info[ck.NUM_COMPONENT])
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(list(defective), sample_, np.nan)
else:
# setup failure times for each component
failure_times = sample(fail[ck.DIST], fail[ck.PARAM], component_info[ck.NUM_COMPONENT])
# initalize failures per day for this failure mode
self.fails_per_day = {self.mode: np.zeros(self.case.config[ck.LIFETIME_YRS] * 365)}
df[f"time_to_failure_{mode}"] = failure_times
def reinitialize_components(self, df: pd.DataFrame) -> pd.DataFrame:
component_info = self.case.config[self.level]
num_repaired = len(df)
fraction_failure = False
failure_times = None
mode = self.mode
fail = component_info[ck.PARTIAL_FAIL][mode]
if fail.get(ck.FRAC, None) or fail.get(ck.DECAY_FRAC, None):
if fail.get(ck.FRAC, None):
fraction_failure = True
frac = fail[ck.FRAC]
else:
frac = fail[ck.DECAY_FRAC]
# choose a percentage of modules to be defective
sample_ = np.random.random_sample(size=num_repaired)
defective = sample_ < frac
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
# only give a possible failure time if the module is defective, otherwise it is set to nan, partial failure is not applied
failure_times = np.where(list(defective), sample_, np.nan)
else:
# setup failure times for each component
failure_times = sample(fail[ck.DIST], fail[ck.PARAM], num_repaired)
df[f"time_to_failure_{mode}"] = failure_times
# now, need to make sure that our fractional failure percentage is met for all components in this level
# TODO: need to speed this up somehow
if fraction_failure:
# removes the diminishing effect where at the beginning of the simulation frac modules are a defective failure, then frac of frac is defective, etc.
# NOTE: i think i should just instead of doing the whole df, find the fraction, then sample that fraction from the components and just update those using the same method below
# number currently with failure mode is going to be the number non nan time_to_failures
counts = self.df[f"time_to_failure_{mode}"].isna()
update_df = self.df.loc[counts].copy()
frac = (~counts).sum() / len(self.df)
if frac >= fail[ck.FRAC]:
return df
sample_ = np.random.random_sample(size=len(update_df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < (fail[ck.FRAC] - frac)
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(update_df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(
list(defective),
sample_,
np.nan,
)
update_df[f"time_to_failure_{mode}"] = failure_times
self.df.loc[counts] = update_df
return df
def update(self, day: int):
df = self.df
# decrement time to failures
df[f"time_to_failure_{self.mode}"] -= 1
mask = (df["state"] == 1) & (df[f"time_to_failure_{self.mode}"] < 1)
failed_comps = df.loc[mask].copy()
if len(failed_comps) > 0:
self.last_failure_day = day
failed_comps["cumulative_failures"] += 1
failed_comps[f"failure_by_type_{self.mode}"] += 1
self.fails_per_day[self.mode][day] += len(failed_comps)
warranty_mask = failed_comps["time_left_on_warranty"] <= 0
failed_comps.loc[warranty_mask, "cumulative_oow_failures"] += 1
failed_comps["state"] = 0
# update time to detection times for component levels with only static monitoring
# which will have None for monitor times
try:
if failed_comps["monitor_times"].isnull().any():
# monitor and time to detection will be the time to next static monitoring
indep_monitors = list(self.case.config[self.level][ck.INDEP_MONITOR].keys())
# next static monitoring is the min of the possible static monitors for this component level
failed_comps["monitor_times"] = np.amin(self.indep_monitoring.indep_monitoring[indep_monitors])
# in order to calculate the time to detection for component levels only monitoring by an
# independment monitoring with a threshold (no interval), need to instead
# set the nans that will be there to the day in the simulation when these components failed
# so it can be calculated later
failed_comps["monitor_times"] = failed_comps["monitor_times"].fillna(day)
failed_comps["time_to_detection"] = None # failed_comps["monitor_times"].copy()
# fails if no monitoring defined, faster then just doing a check if the column exists or whatever
except KeyError:
pass
df.loc[mask] = failed_comps
else:
# check to see when last failure was for fraction failure, and update components with new failures
# if its been longer then the mean time of the distribution
# this is so if repairs arent occuring due to poor monitoring, failures are still occuring
fail = self.case.config[self.level][ck.PARTIAL_FAIL][self.mode]
if fail.get(ck.FRAC, None):
# extract mean, since some distributions might not have mean defined in params
if not self.mean:
self.mean = sample(fail[ck.DIST], fail[ck.PARAM], 10000).mean()
if day > (self.mean + self.last_failure_day):
# fail new fraction of components
counts = self.df[f"time_to_failure_{self.mode}"].isna()
update_df = self.df.loc[counts].copy()
sample_ = np.random.random_sample(size=len(update_df))
# we just want the difference in fractions to bump it up to the failure fraction
defective = sample_ < fail[ck.FRAC]
sample_ = sample(fail[ck.DIST], fail[ck.PARAM], len(update_df))
# only give a possible failure time if the module is defective, otherwise it is set to numpy max float value (which won't be used)
failure_times = np.where(
list(defective),
sample_,
np.nan,
)
update_df[f"time_to_failure_{self.mode}"] = failure_times
self.df.loc[counts] = update_df
self.last_failure_day = day
| 47.349896
| 191
| 0.608264
| 2,911
| 22,870
| 4.642047
| 0.106493
| 0.018649
| 0.035521
| 0.016577
| 0.842818
| 0.806483
| 0.794716
| 0.776512
| 0.750685
| 0.716273
| 0
| 0.004239
| 0.30892
| 22,870
| 482
| 192
| 47.448133
| 0.850743
| 0.323874
| 0
| 0.73741
| 0
| 0
| 0.053411
| 0.025045
| 0
| 0
| 0
| 0.004149
| 0
| 1
| 0.039568
| false
| 0.017986
| 0.02518
| 0
| 0.086331
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cb6e43fb530eaa5b3c694820f5942182fb655c4
| 32,823
|
py
|
Python
|
octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from oslo_utils import uuidutils
from octavia.api.drivers.amphora_driver.v1 import driver
from octavia.common import constants as consts
from octavia.network import base as network_base
from octavia.tests.common import sample_data_models
from octavia.tests.unit import base
class TestAmphoraDriver(base.TestRpc):
def setUp(self):
super(TestAmphoraDriver, self).setUp()
self.amp_driver = driver.AmphoraProviderDriver()
self.sample_data = sample_data_models.SampleDriverDataModels()
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
provider_vip_dict = self.amp_driver.create_vip_port(
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port_failed(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.side_effect = (
network_base.AllocateVIPException())
self.assertRaises(exceptions.DriverError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
# Load Balancer
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_create(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_create(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.FLAVOR: None,
consts.AVAILABILITY_ZONE: None}
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_delete(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_delete(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
'cascade': False}
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_failover(self, mock_cast):
self.amp_driver.loadbalancer_failover(self.sample_data.lb_id)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id}
mock_cast.assert_called_with({}, 'failover_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
lb_dict = {'enabled': True}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_name(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
lb_dict = {'name': 'Great LB'}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_qos(self, mock_cast):
qos_policy_id = uuidutils.generate_uuid()
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id,
vip_qos_policy_id=qos_policy_id)
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
# Listener
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_create(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'create_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_delete(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_delete(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'delete_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, admin_state_up=False)
listener_dict = {'enabled': False}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update_name(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, name='Great Listener')
listener_dict = {'name': 'Great Listener'}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
# Pool
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id,
lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN)
self.amp_driver.pool_create(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'create_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create_unsupported_algorithm(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_create,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_delete(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
self.amp_driver.pool_delete(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'delete_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, admin_state_up=True)
pool_dict = {'enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_name(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, name='Great pool',
admin_state_up=True, tls_enabled=True)
pool_dict = {'name': 'Great pool',
'enabled': True,
'tls_enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_unsupported_algorithm(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_update,
old_provider_pool,
provider_pool)
mock_cast.assert_not_called()
# Member
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "fe80::1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_create,
provider_member)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_delete(self, mock_cast):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_delete(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'delete_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, admin_state_up=True)
member_dict = {'enabled': True}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update_name(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, name='Great member')
member_dict = {'name': 'Great member'}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_no_admin_addr(self, mock_cast,
mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id,
monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_clear_already_empty(
self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool_get.return_value = mock_pool
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, [])
mock_cast.assert_not_called()
# Health Monitor
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_create(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_create(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'create_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_delete(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_delete(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "192.0.1.1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='fe80::1', monitor_address='fe80::2',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_batch_update,
self.sample_data.pool1_id, prov_members)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True,
max_retries=1, max_retries_down=2)
hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update_name(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, name='Great HM')
hm_dict = {'name': 'Great HM'}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
# L7 Policy
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_create(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_create(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'create_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_delete(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_delete(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'delete_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True)
l7policy_dict = {'enabled': True}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update_name(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy')
l7policy_dict = {'name': 'Great L7Policy'}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
# L7 Rules
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_create(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_create(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'create_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_delete(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_delete(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'delete_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True)
l7rule_dict = {'enabled': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update_invert(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, invert=True)
l7rule_dict = {'invert': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
# Flavor
def test_get_supported_flavor_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_flavor_metadata
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', test_schema):
result = self.amp_driver.get_supported_flavor_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.get_supported_flavor_metadata)
def test_validate_flavor(self):
ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE}
self.amp_driver.validate_flavor(ref_dict)
# Test bad flavor metadata value is bad
ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test bad flavor metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_flavor, 'bogus')
# Availability Zone
def test_get_supported_availability_zone_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_availability_zone_metadata
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema):
result = self.amp_driver.get_supported_availability_zone_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(
exceptions.DriverError,
self.amp_driver.get_supported_availability_zone_metadata)
def test_validate_availability_zone(self):
ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'}
self.amp_driver.validate_availability_zone(ref_dict)
# Test bad availability zone metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_availability_zone,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_availability_zone,
'bogus')
| 47.363636
| 79
| 0.666362
| 3,997
| 32,823
| 5.100826
| 0.059044
| 0.063273
| 0.087208
| 0.083186
| 0.901462
| 0.882578
| 0.868992
| 0.853002
| 0.843241
| 0.820777
| 0
| 0.017563
| 0.236724
| 32,823
| 692
| 80
| 47.432081
| 0.796232
| 0.029035
| 0
| 0.722414
| 0
| 0
| 0.128758
| 0.074038
| 0
| 0
| 0
| 0
| 0.089655
| 1
| 0.07931
| false
| 0
| 0.015517
| 0
| 0.096552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cd05f2e63c93f4caf445b368e9eaf6372595673
| 75,027
|
py
|
Python
|
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_9():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(500.0,4000.0,401,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([504.375,513.125,521.875,530.625,539.375,548.125,556.875,565.625,574.375,583.125,591.875,600.625,609.375,618.125,626.875,635.625,644.375,653.125,661.875,670.625,679.375,688.125,696.875,705.625,714.375,723.125,731.875,740.625,749.375,758.125,766.875,775.625,784.375,793.125,801.875,810.625,819.375,828.125,836.875,845.625,854.375,863.125,871.875,880.625,889.375,898.125,906.875,915.625,924.375,933.125,941.875,950.625,959.375,968.125,976.875,985.625,994.375,1003.125,1011.875,1020.625,1029.375,1038.125,1046.875,1055.625,1064.375,1073.125,1081.875,1090.625,1099.375,1108.125,1116.875,1125.625,1134.375,1143.125,1151.875,1160.625,1169.375,1178.125,1186.875,1195.625,1204.375,1213.125,1221.875,1230.625,1239.375,1248.125,1256.875,1265.625,1274.375,1283.125,1291.875,1300.625,1309.375,1318.125,1326.875,1335.625,1344.375,1353.125,1361.875,1370.625,1379.375,1388.125,1396.875,1405.625,1414.375,1423.125,1431.875,1440.625,1449.375,1458.125,1466.875,1475.625,1484.375,1493.125,1501.875,1510.625,1519.375,1528.125,1536.875,1545.625,1554.375,1563.125,1571.875,1580.625,1589.375,1598.125,1606.875,1615.625,1624.375,1633.125,1641.875,1650.625,1659.375,1668.125,1676.875,1685.625,1694.375,1703.125,1711.875,1720.625,1729.375,1738.125,1746.875,1755.625,1764.375,1773.125,1781.875,1790.625,1799.375,1808.125,1816.875,1825.625,1834.375,1843.125,1851.875,1860.625,1869.375,1878.125,1886.875,1895.625,1904.375,1913.125,1921.875,1930.625,1939.375,1948.125,1956.875,1965.625,1974.375,1983.125,1991.875,2000.625,2009.375,2018.125,2026.875,2035.625,2044.375,2053.125,2061.875,2070.625,2079.375,2088.125,2096.875,2105.625,2114.375,2123.125,2131.875,2140.625,2149.375,2158.125,2166.875,2175.625,2184.375,2193.125,2201.875,2210.625,2219.375,2228.125,2236.875,2245.625,2254.375,2263.125,2271.875,2280.625,2289.375,2298.125,2306.875,2315.625,2324.375,2333.125,2341.875,2350.625,2359.375,2368.125,2376.875,2385.625,2394.375,2403.125,2411.875,2420.625,2429.375,2438.125,2446.875,2455.625,2464.375,2473.125,2481.875,2490.625,2499.375,2508.125,2516.875,2525.625,2534.375,2543.125,2551.875,2560.625,2569.375,2578.125,2586.875,2595.625,2604.375,2613.125,2621.875,2630.625,2639.375,2648.125,2656.875,2665.625,2674.375,2683.125,2691.875,2700.625,2709.375,2718.125,2726.875,2735.625,2744.375,2753.125,2761.875,2770.625,2779.375,2788.125,2796.875,2805.625,2814.375,2823.125,2831.875,2840.625,2849.375,2858.125,2866.875,2875.625,2884.375,2893.125,2901.875,2910.625,2919.375,2928.125,2936.875,2945.625,2954.375,2963.125,2971.875,2980.625,2989.375,2998.125,3006.875,3015.625,3024.375,3033.125,3041.875,3050.625,3059.375,3068.125,3076.875,3085.625,3094.375,3103.125,3111.875,3120.625,3129.375,3138.125,3146.875,3155.625,3164.375,3173.125,3181.875,3190.625,3199.375,3208.125,3216.875,3225.625,3234.375,3243.125,3251.875,3260.625,3269.375,3278.125,3286.875,3295.625,3304.375,3313.125,3321.875,3330.625,3339.375,3348.125,3356.875,3365.625,3374.375,3383.125,3391.875,3400.625,3409.375,3418.125,3426.875,3435.625,3444.375,3453.125,3461.875,3470.625,3479.375,3488.125,3496.875,3505.625,3514.375,3523.125,3531.875,3540.625,3549.375,3558.125,3566.875,3575.625,3584.375,3593.125,3601.875,3610.625,3619.375,3628.125,3636.875,3645.625,3654.375,3663.125,3671.875,3680.625,3689.375,3698.125,3706.875,3715.625,3724.375,3733.125,3741.875,3750.625,3759.375,3768.125,3776.875,3785.625,3794.375,3803.125,3811.875,3820.625,3829.375,3838.125,3846.875,3855.625,3864.375,3873.125,3881.875,3890.625,3899.375,3908.125,3916.875,3925.625,3934.375,3943.125,3951.875,3960.625,3969.375,3978.125,3986.875,3995.625])
# Creating weights for histo: y10_M_0
y10_M_0_weights = numpy.array([0.302422000731,0.281076200776,0.305633179241,0.284245253152,0.313115404225,0.286374021819,0.347284695281,0.335515236835,0.310965492555,0.302413087896,0.320573648488,0.36008928158,0.327030577715,0.370814059852,0.310927363208,0.351606381046,0.396443695999,0.353709410405,0.33551008098,0.345131586181,0.370788160673,0.328068063673,0.401787400156,0.360137962445,0.351575286044,0.345148812333,0.39114223788,0.366504484351,0.349411025908,0.363290588026,0.320544951558,0.36864991962,0.365432945769,0.340877086405,0.348358552,0.324860322399,0.335540016914,0.351563135811,0.39220162623,0.377214994094,0.351568291667,0.35693325873,0.370771094392,0.303438423621,0.307735489179,0.354761444667,0.319470775096,0.332301020767,0.3376815753,0.316293449371,0.318430531356,0.308820656804,0.351561696968,0.344098176947,0.338746439248,0.30884156,0.3088426791,0.30881342262,0.292752574054,0.313054133481,0.3034731557,0.31844883664,0.324837380842,0.339811303196,0.301333995386,0.307791484164,0.274618032545,0.261810688463,0.32056761334,0.23828699933,0.283191220497,0.275723783624,0.274615074923,0.289592074803,0.285316152248,0.266085292078,0.263957642511,0.290636555137,0.263943174142,0.273570394749,0.247865259296,0.266065108304,0.260735392901,0.294938456807,0.271420363175,0.263900768233,0.230833910923,0.23402918222,0.259614973615,0.253239578841,0.263914677052,0.223343132814,0.208365813191,0.235101719999,0.215851875091,0.221189703971,0.226525774265,0.232961120843,0.224418628215,0.191282626491,0.215856071718,0.216920056372,0.205154994392,0.212655005079,0.22012527967,0.190208130286,0.217968933173,0.185954589739,0.204078619698,0.185941320406,0.226548915662,0.181663079715,0.220151938239,0.206255669551,0.193417989856,0.222270794874,0.185926852037,0.18485935021,0.197698508716,0.190220080679,0.194491167121,0.180584866499,0.198745267219,0.194465947396,0.150666518079,0.198738472681,0.168835471923,0.181674270719,0.189137351093,0.165626291807,0.154948555717,0.167741231591,0.148537749412,0.17632629,0.184850357439,0.164534889193,0.16671393744,0.156004227055,0.164550876341,0.162431779898,0.150637261598,0.154941441437,0.161354086264,0.14854378456,0.168847102573,0.139993058551,0.156019254974,0.158149302613,0.132501840796,0.153888607817,0.142107878432,0.165643957605,0.163489089919,0.131440813764,0.128222840716,0.119688221759,0.110063838871,0.124997233805,0.134619298557,0.132505997455,0.132504358772,0.130375470201,0.1111258651,0.135726688318,0.114341719851,0.136757339769,0.118619081249,0.123953073213,0.127189431481,0.115413857951,0.12715849635,0.0993840244523,0.116468729932,0.116483518044,0.112215389224,0.115413937887,0.113247159776,0.0865493823155,0.126074687632,0.0951080219622,0.0940406400383,0.102594323669,0.0886953371666,0.104742476753,0.105803383882,0.104707944514,0.0983076497579,0.0886871437533,0.0886833867736,0.102578656264,0.0769412263826,0.0812045590582,0.0940196968747,0.100417793398,0.100448368818,0.0918890896855,0.0908337380904,0.089753646384,0.0716115509479,0.0961684894446,0.0790906984063,0.0726530736601,0.0812112336924,0.0929610679143,0.0673242375506,0.0822770968372,0.067317083302,0.0790863419085,0.0737488327713,0.079057085428,0.0576965373296,0.0908203488541,0.0544940718152,0.0726658633784,0.07373084723,0.0673148850692,0.0609093545223,0.0673231184503,0.073718896837,0.0780066498809,0.0694536457038,0.0694570429727,0.0673175229486,0.0726635852098,0.0694739094136,0.0598490868792,0.0587709535985,0.0480770305218,0.0726437211787,0.055574922911,0.0694588814947,0.0544914339358,0.0438140175893,0.05555993496,0.0459487015012,0.0673164438161,0.0491641166053,0.0555677286945,0.0502150717348,0.0544993475739,0.0715876501619,0.0480889809148,0.0480777899113,0.0459431059994,0.0459516591235,0.0630604652285,0.0427432383965,0.0555509421893,0.0534346834653,0.051304396019,0.041669621485,0.0384645940302,0.0512850116023,0.043805824176,0.051292445626,0.0438092214449,0.0459479421116,0.0470261553281,0.0480889809148,0.0438080224088,0.0523627851722,0.0416852888899,0.0480915788263,0.0405895297787,0.045950579991,0.0438080224088,0.0427316477144,0.040597723192,0.0459426663529,0.0416826510105,0.0406034385973,0.0438180943119,0.0395479870825,0.0523575493813,0.0470182816578,0.0267164064845,0.0331349905376,0.0352685633427,0.0352677959596,0.0331170169867,0.0309879405668,0.0309846472144,0.0363381434994,0.0427503926452,0.030991333839,0.0395442580803,0.0320564256039,0.0342027081915,0.0331230441414,0.0224436693692,0.0267081850937,0.036318639179,0.0267148717183,0.0395360366895,0.0299180246801,0.0288510744092,0.0352730557313,0.0235095245205,0.0309902387193,0.035254318794,0.0320582881066,0.029909471556,0.0320560898738,0.0288454829043,0.019242382907,0.0235045964821,0.0235068946346,0.0245764747913,0.0192394212878,0.0331162536003,0.0331308298824,0.0224384135944,0.0288518417923,0.0245775739077,0.0309849789477,0.0181656764792,0.0288558985311,0.021368829441,0.0245813029099,0.0235087571374,0.0288476811371,0.0224384135944,0.0320587197596,0.0181735661366,0.0181611840906,0.0203045090559,0.0181690737481,0.0213841731061,0.0181634822431,0.0224350163255,0.0138971040159,0.0203074706751,0.0235106236369,0.0256415625594,0.0224429059829,0.0256370701709,0.0170965279755,0.0171002569777,0.0192356922856,0.0181690737481,0.0256397000567,0.0192356922856,0.0160280469353,0.0181664438623,0.0170949972061,0.0106801381584,0.00748071419888,0.00855106173862,0.0149685506721,0.0171021194805,0.0203029782865,0.0128193024685,0.0170949972061,0.00855029435553,0.0160261804358,0.0138862527394,0.0128248939735,0.0106820006612,0.0138870201225,0.014955832896,0.0149584627818,0.0160209246609,0.0192389856381,0.0128278555926,0.00961351962092,0.0053441797746,0.0149629591672,0.00640926754272,0.00854919523912,0.0138854893531,0.0106864970465,0.0096097906187,0.0160283786686,0.00427196973214,0.0138918442443,0.0106849622803,0.0149595618982,0.0106883595492,0.0160344018265,0.00961275223783,0.0160288103216,0.00640926754272,0.0138873518558,0.00748147758519,0.00854470285058,0.00748334408469,0.00854843185281,0.0117489549288,0.00961877939255,0.0085473327364,0.00748334408469,0.0192285700112,0.00320874446674,0.0117560772031,0.00641189742853,0.0128219323543,0.00748147758519,0.00747961508246,0.00427383223486,0.00533815661666])
# Creating weights for histo: y10_M_1
y10_M_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_2
y10_M_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,1.05462838872,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_3
y10_M_3_weights = numpy.array([0.921538864215,1.61075598599,0.691665206503,0.9215769054,0.460892777767,0.4606068925,0.460633021798,0.690728394295,1.15237623194,0.690988918773,0.461101043648,0.460768663599,0.688950449223,1.15134988846,0.229943169332,0.691884231506,0.691524569395,0.0,0.0,0.921347505528,0.691859639225,0.230489002696,0.461214398693,0.230455995243,0.230176949704,0.921662978384,0.230010874956,0.460609198026,0.230010874956,0.230754714608,0.0,0.0,0.230350863242,0.690526660739,0.0,0.0,0.0,0.230829221534,0.0,0.0,0.230587832925,0.0,0.230350863242,0.0,0.0,0.0,0.691036566317,0.0,0.230455995243,0.0,0.229943169332,0.230663838444,0.0,0.0,0.0,0.0,0.230588947263,0.0,0.0,0.0,0.0,0.0,0.230742917998,0.0,0.461232074395,0.229973218025,0.0,0.0,0.230176949704,0.0,0.229973218025,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.23041895312,0.230176949704,0.0,0.230541952951,0.0,0.0,0.0,0.229694018784,0.0,0.460062404026,0.0,0.0,0.0,0.0,0.229943169332,0.0,0.0,0.0,0.0,0.0,0.230663838444,0.230119234694,0.0,0.0,0.0,0.0,0.230513210722,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230610350233,0.23063590315,0.0,0.0,0.0,0.230119234694,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230010874956,0.0,0.230176949704,0.0,0.230559782355,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230010874956,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_4
y10_M_4_weights = numpy.array([1.41203635418,1.32912565106,1.32935608573,1.3847581208,1.05219203081,0.747902083288,1.05254941614,0.885974384635,1.1912053058,1.02465335623,0.692103808298,0.747588168942,1.07992343956,0.664885972649,0.664565133722,0.692136892408,0.775193781135,0.60917925601,0.719921389615,0.443169731971,0.36001497583,0.526155066693,0.47092422266,0.387698451091,0.526108133421,0.470529136835,0.636864500887,0.359810085167,0.525826149088,0.526287403133,0.415506030236,0.304456868396,0.38748225028,0.41526905568,0.359925110154,0.166157056088,0.387721917728,0.276932696971,0.304658296768,0.33231234256,0.13850501073,0.166118239963,0.110698662623,0.27683371393,0.276778124932,0.166364216474,0.138430379133,0.166168904815,0.221759980949,0.193776594383,0.110763599806,0.221502732759,0.110903168586,0.387399540005,0.193844686098,0.110702778902,0.138378521714,0.110915671302,0.166112623358,0.0553624495458,0.193734508318,0.166033413844,0.138622382381,0.19395040137,0.138498701668,0.0554021889476,0.0831243264542,0.0554910544059,0.221589059204,0.249432107592,0.0829624066647,0.027663897932,0.110827421363,0.193800791947,0.0831705287985,0.0276946276844,0.0,0.0829388630888,0.027663897932,0.0276887610254,0.138310968578,0.0831810310799,0.0276421970641,0.0553722208992,0.0830706994201,0.0553250952774,0.110807570897,0.0,0.05526531306,0.0277280849524,0.0553242874096,0.0277280849524,0.0277196023405,0.0553441763455,0.0277196023405,0.0276970589818,0.0276713033869,0.0554044202016,0.0276713033869,0.055427771428,0.0,0.0553834925785,0.0554136145065,0.0277236224445,0.0831349441453,0.02772785798,0.055348754263,0.0552963967355,0.0,0.0554927470813,0.0276593854133,0.0276251125835,0.0276848024731,0.0,0.0,0.0553726055981,0.0,0.0276603702426,0.0276398350125,0.0276398350125,0.0276251125835,0.0,0.0552942424213,0.0276890149267,0.0553575253991,0.0,0.0276055006309,0.0276890149267,0.0,0.0276398350125,0.0277280849524,0.0276421970641,0.0,0.0553585256164,0.02772785798,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276970589818,0.0,0.0277261537636,0.0,0.0277648852543,0.0,0.0,0.0,0.0,0.0,0.0554651641663,0.0,0.0,0.0,0.0,0.0,0.0553600644122,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276925079932,0.0,0.0,0.0,0.0,0.0276890149267,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276398350125,0.0276848024731,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276055006309,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0277075497223,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0277280849524,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276055006309,0.0,0.0,0.0276946276844,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276946276844,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0276398350125,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_5
y10_M_5_weights = numpy.array([0.665165918974,0.605017117055,0.625168262825,0.645262668737,0.514155666617,0.5343296297,0.473963577839,0.554283369309,0.433342268716,0.372994543527,0.483952948618,0.443604051503,0.35289182386,0.413277173341,0.40320794197,0.433513095137,0.35293515248,0.322710041959,0.383046176439,0.463718422563,0.332767743306,0.25222560441,0.282161554045,0.262009862116,0.242002720287,0.151316647595,0.242195453754,0.27228796899,0.252113459748,0.221806000576,0.181502131241,0.272281779187,0.292434138644,0.181446301648,0.151356153101,0.171364144511,0.161332537431,0.171373307847,0.141221746747,0.110901483179,0.141047764741,0.110897113907,0.151250440979,0.121045477659,0.151329755413,0.070594033469,0.0403608701359,0.141278790028,0.171448010271,0.151245889653,0.100800331844,0.0806780718199,0.110916108106,0.100791168509,0.0503811474495,0.0806276431323,0.0503550653197,0.100800513897,0.0907425091286,0.0503429769989,0.0907638700167,0.0907341346895,0.110875388913,0.0705047061187,0.0706028326985,0.0603780188127,0.0503521949503,0.0705042813283,0.0805432312137,0.0403227178906,0.0504429969301,0.0604569387985,0.0806351679907,0.040356858901,0.0403787538113,0.0504427723981,0.0403403284864,0.0605297053921,0.0302851091291,0.0403535273306,0.0402523361916,0.10083437576,0.0906247208223,0.0604858002712,0.0302487410034,0.0100941969097,0.0201551327319,0.0403515490211,0.0,0.0302742344952,0.0302305842485,0.0302678201603,0.0,0.0201731741865,0.0403381499186,0.0201436512545,0.0503739442182,0.0403699181712,0.0100968063363,0.0402763186433,0.0100788134292,0.0302728873028,0.0302636693514,0.0201453261423,0.0201498835364,0.0100993368734,0.0403804286991,0.0100228199875,0.0100914782512,0.0100989849042,0.0201957912405,0.0302391771513,0.020185104728,0.0100993368734,0.0,0.0201527903163,0.0100946459738,0.0,0.0,0.0201646723103,0.0302550703802,0.0100946459738,0.0201426135523,0.0403196290576,0.020178842104,0.0,0.0101026381015,0.0201694663733,0.0,0.0201756197655,0.0201654247962,0.0,0.0100914782512,0.0100846027153,0.0101026381015,0.0301816726695,0.0,0.0100946459738,0.0,0.0302556529499,0.0,0.0100993368734,0.0100822602997,0.0100941969097,0.0201783262871,0.0100822602997,0.0100705300166,0.0100822602997,0.0302180954111,0.0,0.0100585691329,0.0,0.0,0.0100989849042,0.0,0.0,0.0,0.0,0.0,0.0,0.0100822602997,0.0100968063363,0.0,0.0201931818138,0.0,0.0100422511136,0.0,0.0,0.0,0.0,0.0100914782512,0.0100989849042,0.010072660037,0.0,0.0100119392851,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200671404371,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100968063363,0.0201567044563,0.0,0.0,0.0,0.0,0.0100846027153,0.0,0.0,0.0,0.0,0.0100846027153,0.0,0.0100700263365,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100119392851,0.0100946459738,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100789894137,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100993368734,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100846027153,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100989849042,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100968063363,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100443143812,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100914782512,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_6
y10_M_6_weights = numpy.array([0.308375781966,0.33101189686,0.319673870988,0.390385413264,0.294220438246,0.282909921861,0.330964303522,0.248998456557,0.206458707663,0.229137029422,0.220690000655,0.243357664893,0.232045378667,0.288554099309,0.240460704192,0.226308284557,0.217835977841,0.198029107984,0.22635576247,0.206498336716,0.152754338858,0.183884498736,0.198067467368,0.169701607052,0.155590240038,0.16689129162,0.147117394675,0.099027346866,0.115982502396,0.115976231002,0.141457327131,0.110294502142,0.161281549126,0.121656228496,0.113172764086,0.115974422686,0.110350175189,0.104704651124,0.0820678436826,0.124457579298,0.096191830434,0.0905498075758,0.0764182412871,0.104639628697,0.0622550871807,0.0791757308075,0.0651171127557,0.11320000425,0.0481230591944,0.0707492091133,0.0905253760724,0.0679119997899,0.0509378761793,0.0622565107486,0.0565847468635,0.059394831447,0.0877037875212,0.053743728534,0.0480928564698,0.04527800101,0.0537538858834,0.0594129530818,0.0537510002727,0.0452760772696,0.0311206258198,0.0367866761959,0.0396330540663,0.0339631754467,0.0509289500238,0.0480907788301,0.0481022058482,0.0424545656679,0.0311151893294,0.0367982340284,0.0481046297611,0.0254669989593,0.0254628398325,0.0339716322096,0.0169817877924,0.0339525833319,0.0169725730757,0.0254620280141,0.0169643856365,0.0310984527877,0.028294424139,0.0396120468209,0.0198220904904,0.0169894173469,0.0283038889419,0.0254792724232,0.0339425798817,0.0226435059051,0.0169731617403,0.0198094476684,0.0283070438762,0.0283036965679,0.0339530527246,0.0254655138317,0.0282980869408,0.00848691175351,0.00282881642877,0.0226211097191,0.0282934507264,0.0141534623021,0.0226179240049,0.0113310811698,0.0113159067054,0.0113198349833,0.00848708489014,0.0141466830409,0.0169638508367,0.0198037995665,0.0141503150628,0.00849348709827,0.0113118745455,0.0141495494141,0.014148579849,0.0169642432797,0.011326217954,0.014132193428,0.0,0.00282588657211,0.00849419503474,0.0113245442999,0.00849842726367,0.0226387581137,0.00283357691682,0.0141485529166,0.00847838958344,0.00848143294078,0.0,0.00282350575096,0.0141561363013,0.00566206053849,0.0169700876031,0.00848052108782,0.0113103394006,0.0169517505094,0.0113228937306,0.0,0.00848620381703,0.00566186816445,0.00565694338897,0.00282360232273,0.00565915184298,0.0,0.00566409970334,0.00567251029646,0.00283014765714,0.00282881642877,0.00847679672637,0.00282711007102,0.00566506926851,0.00282881642877,0.00282145388943,0.0,0.00565811302315,0.00283625437874,0.00282933583868,0.0028301953659,0.0,0.00566358414091,0.00566701224634,0.00283625437874,0.00848428777157,0.0,0.00565428093223,0.0,0.00282360232273,0.00565950965869,0.00566306088351,0.00282803154268,0.0056600713909,0.0056623798794,0.00566508850592,0.00848482641889,0.0056494869711,0.00566304549359,0.00282933583868,0.00565032572192,0.0,0.0,0.0,0.00283100449112,0.0,0.00283443990677,0.00848893168095,0.0,0.00283105566262,0.00282803154268,0.00565367303025,0.0,0.00283200714463,0.00283105566262,0.00566515391309,0.0,0.00283100449112,0.00847689676087,0.0,0.00283204138721,0.0028254664272,0.00282145388943,0.0,0.0,0.00283357691682,0.0,0.00566321478275,0.00282750328356,0.00282516209146,0.0,0.00283443990677,0.0,0.00566272615268,0.0,0.0,0.00283304557971,0.0,0.0,0.00283253040203,0.00283100449112,0.0,0.00282145388943,0.0,0.0,0.00566303779863,0.0,0.00565367303025,0.0,0.0,0.0028254664272,0.00282881642877,0.0,0.00282588657211,0.0,0.00282750328356,0.0,0.0,0.0,0.00282360232273,0.00564943695385,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00283357691682,0.0,0.0,0.0,0.0,0.0,0.0,0.00283100449112,0.0,0.0,0.0,0.0,0.0,0.00283304557971,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282360232273,0.00282145388943,0.0,0.0,0.0,0.00283253040203,0.00283204138721,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282933583868,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282588657211,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00283105566262,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_7
y10_M_7_weights = numpy.array([0.0517546980756,0.0638608638856,0.0533004556713,0.0396092673901,0.0472327267196,0.0425651692873,0.0380394404139,0.0487828917179,0.0259704716632,0.0410344890257,0.035057531297,0.0395601478934,0.042640071498,0.0410860426375,0.0365612826836,0.0259310768107,0.032019696614,0.0289537987112,0.0243771424789,0.0213551295439,0.0380879454738,0.0304741989723,0.0319980849807,0.0258920128087,0.0411845534011,0.0350930504712,0.0258562100482,0.0213569137739,0.0228745254371,0.0167280067802,0.021304332162,0.0197828093721,0.0137231862599,0.0152196353014,0.0198805520897,0.0182681162837,0.0228478683323,0.0121690865028,0.0121588065023,0.0152873296956,0.0137177035929,0.0182505930185,0.0182451221676,0.0167861774039,0.00913206476704,0.0152297026122,0.0137684300783,0.0106383093493,0.00917388546342,0.0182868093421,0.0167436867351,0.0106601124032,0.0151965584726,0.018274284284,0.00608114257998,0.00913198087279,0.0121415077428,0.00912742695072,0.0137104130638,0.0122298212186,0.0106822321285,0.0136925234997,0.00458698113401,0.00610711435141,0.0106296186132,0.00609434115536,0.00913913079039,0.00610993957914,0.00760176852844,0.0136674733835,0.0121878977222,0.0106333335927,0.0106629010011,0.00760096857897,0.00609562674623,0.0137456604679,0.00761672770161,0.0,0.00917287518751,0.00611583817254,0.00456609146398,0.00304679600755,0.00457579129436,0.00609176879201,0.00610059895798,0.00456793122959,0.00913266738776,0.00304471401204,0.00153182878581,0.00152058341053,0.00455065610228,0.00302924556528,0.00455527501286,0.00609288423115,0.00609874028662,0.00305861446332,0.00456792413994,0.00304150121647,0.00913510977409,0.0,0.0015241294199,0.00457865787842,0.00759761044547,0.00456501383634,0.00305174576872,0.00152459261073,0.00152459261073,0.0045804562877,0.00304597715233,0.00306027462432,0.00152156178299,0.0,0.0,0.00301892539004,0.00305242519404,0.00153116235819,0.00305445756196,0.00152268903822,0.0,0.0015222400267,0.00304312356598,0.00456986552395,0.0,0.00306792318103,0.00303287310569,0.0,0.00305066932269,0.0,0.00152126283585,0.00154503799568,0.00457481410351,0.00152288518536,0.00305726742877,0.00151845296904,0.0,0.0,0.00151412237112,0.0,0.0,0.0,0.00303452972187,0.00305587076663,0.0,0.0,0.0,0.00151228969516,0.0,0.0015222400267,0.00305595820571,0.00304821512026,0.0,0.0,0.0,0.0,0.00151228969516,0.0,0.00151845296904,0.0,0.0,0.0,0.0,0.0,0.00307129076741,0.00305066932269,0.0015241294199,0.0,0.0015359254251,0.00152772033043,0.0,0.0,0.0,0.00304872203063,0.0,0.0,0.00305987996683,0.00457084744124,0.00152459261073,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0015081327938,0.0015222400267,0.0,0.00304327244875,0.00153296667552,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152058341053,0.0,0.0,0.0,0.0,0.0,0.0,0.00153296667552,0.0,0.00152288518536,0.0015081327938,0.0,0.0,0.00153296667552,0.0,0.0,0.0015241294199,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0015081327938,0.0,0.00152459261073,0.00152058341053,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152607671195,0.0,0.00152126283585,0.0,0.00153296667552,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152058341053,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00153784435853,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0015359254251,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_8
y10_M_8_weights = numpy.array([0.0111950935104,0.0111935530021,0.0128185697611,0.0129989632858,0.0113782060323,0.0106514211641,0.0113754177122,0.00903033655258,0.00866627592299,0.00830471476816,0.00866859438802,0.0108376108514,0.0101153011598,0.00758473894675,0.00939083475666,0.00974867943517,0.00957244913421,0.00812334687194,0.00740307450268,0.00613963045231,0.0081255420963,0.00776554840869,0.00722311232031,0.00650041365044,0.00523534436378,0.00451246853545,0.00668138486576,0.00469764918977,0.0045166048003,0.00559550365604,0.00523304515511,0.00361144485843,0.00596155539263,0.00361138092733,0.0057774993094,0.00631888400085,0.00415212823347,0.006139341607,0.0032518647973,0.00541842222424,0.00631842184835,0.0039711108029,0.00487486926732,0.00469313164911,0.00343106288754,0.00487541999904,0.0039696319149,0.00451455207296,0.00324969537645,0.00325102984179,0.00396926604418,0.0034310409353,0.00270977147202,0.00216697259393,0.00451544556779,0.00270994285357,0.00216650813067,0.00198618970575,0.00216537893807,0.00198615658482,0.00270824598365,0.00180359903105,0.00324915427291,0.0021664915702,0.00180544186414,0.00252748158157,0.00325113613686,0.00144400202433,0.00162424650485,0.0025288329925,0.00288841497926,0.00234820608079,0.00216599899267,0.00126339167309,0.00144326566136,0.00252424304795,0.00144359533014,0.00162463432782,0.00234566077591,0.00108473930792,0.0016244675678,0.00180528049589,0.000722272334196,0.00108327158862,0.00144545433856,0.000721967313548,0.00180378196642,0.00144578708835,0.00108417471162,0.0014438421966,0.00108347455059,0.00108218745588,0.00144394772142,0.000722701750891,0.00162664083992,0.00144466790906,0.000541054637543,0.00144378057626,0.000180724117786,0.00162500366469,0.000902267252367,0.000541202141215,0.000722166809376,0.000541387002214,0.000361928720118,0.0012639905457,0.00180546920816,0.00126489405383,0.000541636564562,0.00126440763833,0.00108362783117,0.00144573856234,0.00108426444623,0.000541985874825,0.000722744499997,0.000361529766975,0.00108399870855,0.000180614086979,0.00108283369913,0.00090256765149,0.000542298983142,0.00108364169574,0.000541450933309,0.000903724958368,0.000541177878209,0.000361258714535,0.000181235605062,0.000541786764124,0.000722590064038,0.000361299730569,0.000361138092733,0.000360835459873,0.000361302927124,0.000541864559794,0.0,0.000903733046037,0.000541557228384,0.000361421353701,0.000721582956721,0.000540881330357,0.000360758549995,0.000540491581751,0.000360887490542,0.0,0.000180408698707,0.000360794443839,0.000360750077199,0.000360453837449,0.000180065434939,0.0,0.00054089018828,0.000360937017884,0.0,0.000901484674138,0.00036150908565,0.000721957685371,0.000360879518411,0.0,0.000180707749885,0.0,0.000361487017869,0.000361148683728,0.0,0.000180183977055,0.000180707749885,0.000180619979423,0.000361657321064,0.00018057029803,0.000360491233288,0.000181037341641,0.000180221372894,0.0,0.0,0.0,0.000180916719839,0.000180833994542,0.000360753735907,0.000180822055602,0.000361635330307,0.0,0.000361554376595,0.0,0.000541311902433,0.000180679751146,0.000180707749885,0.000180468932582,0.0,0.000180269821881,0.000541762886245,0.0,0.0,0.0,0.000361230754309,0.0,0.000180724117786,0.000361717092787,0.000180553968641,0.0,0.000360631958724,0.000180822055602,0.000180916719839,0.000180070364566,0.000180833994542,0.000180374769011,0.000180269821881,0.000180070364566,0.0,0.0,0.0,0.0,0.000180801335766,0.000361624431211,0.0,0.000180410585829,0.0,0.0,0.000180614086979,0.0,0.0,0.000180679751146,0.0,0.0,0.0,0.0,0.0,0.000180468932582,0.0,0.00018057029803,0.0,0.000180600761582,0.0,0.000180600761582,0.000180065434939,0.0,0.0,0.000180600761582,0.0,0.0,0.0,0.0,0.000180619979423,0.0,0.0,0.00018057029803,0.0,0.0,0.0,0.0,0.000180374769011,0.0,0.00018057029803,0.0,0.0,0.0,0.0,0.0,0.0,0.00018057029803,0.000180693115056,0.0,0.0,0.0,0.0,0.000180065434939,0.0,0.000180619979423,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00018057029803,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180693115056,0.000360785316327,0.0,0.0,0.0,0.0,0.0,0.000360022726194,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180619979423,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180693115056,0.0,0.000180065434939,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180916719839,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180468932582,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180822055602,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180374769011,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_9
y10_M_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.012170493784,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121240822392,0.0,0.0,0.0,0.0,0.0,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121313846429,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_10
y10_M_10_weights = numpy.array([0.0200670126467,0.0200880860334,0.0300606820052,0.0100299569039,0.0,0.0301341868043,0.030113266303,0.0200658432804,0.0100369731021,0.0200736734897,0.0100367003877,0.0501881711396,0.0200979574689,0.0100696699077,0.0301172991707,0.0100407291233,0.030109873901,0.0200786525938,0.0100546540867,0.0200734007753,0.0100609843668,0.0200665333305,0.0100602901846,0.0100457330197,0.0100187053682,0.0,0.0401615363932,0.0100309733849,0.0100696699077,0.0501817664829,0.0200368322513,0.0100407291233,0.0200939039409,0.020058128768,0.0200873422668,0.0201058537908,0.0201005978402,0.0301433227373,0.0100702897132,0.0,0.0100367003877,0.0,0.0100407291233,0.0100299569039,0.0,0.0,0.0100187053682,0.0201025894819,0.0,0.0100309733849,0.0,0.0,0.0,0.0200725371797,0.0200759006575,0.0200719504305,0.0,0.0100355640776,0.0,0.0100367003877,0.0200795285856,0.0100153625506,0.0,0.0100459437535,0.0,0.0100153625506,0.0,0.0100271595151,0.0,0.0,0.0,0.0,0.0100262835234,0.0,0.0,0.0100340930726,0.0,0.0,0.0,0.0,0.0100184161257,0.0,0.0,0.0,0.0,0.0,0.01003244439,0.0100407291233,0.01003244439,0.0100584803526,0.0,0.0,0.0,0.0100546540867,0.0,0.0200884372565,0.0100702897132,0.020117840829,0.0100568564622,0.0100369731021,0.0,0.0,0.0,0.0,0.0100568564622,0.01003244439,0.0100696699077,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100262835234,0.0,0.0,0.0,0.0,0.0100340930726,0.0,0.0,0.0,0.0,0.0,0.0,0.0100568564622,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_11
y10_M_11_weights = numpy.array([0.137487526503,0.225436956924,0.225540189391,0.18151066875,0.170629657968,0.203567721474,0.219924562572,0.115535128181,0.11002123064,0.137582593002,0.137643614318,0.126395338088,0.154092718671,0.159491195732,0.131945962174,0.0989663375441,0.0935105768237,0.11546996294,0.121014858659,0.137444665325,0.148518327961,0.104455412166,0.126566620293,0.0440433743482,0.0549482330088,0.0660160047715,0.12100299566,0.104505342391,0.0935035077764,0.132031278262,0.0659689590427,0.0605715789028,0.0989920948775,0.0824749814144,0.0550352147923,0.0605183172876,0.0275097745667,0.0659527489859,0.0440469088719,0.0549910129331,0.0989931511719,0.0605275395505,0.0660080013099,0.0660080419366,0.0549338511539,0.0384651444142,0.0495853043179,0.0549965787922,0.0439983193282,0.0439924690821,0.0494825593714,0.0495589375839,0.0219850418828,0.0274517189998,0.0439961254859,0.0274801008186,0.0329867749304,0.0550193297491,0.0219942885217,0.0329932345771,0.0330075311159,0.0385275307882,0.0219698718697,0.0715348181439,0.0220177220073,0.0440127418098,0.0164921605386,0.0274790932762,0.0440461775911,0.0164698686635,0.0274908465831,0.0165060995624,0.0,0.0165157118417,0.0219991596641,0.0165170931498,0.0164983114223,0.00549992195283,0.0110019118051,0.0220134318269,0.00550244080877,0.0274991994344,0.0109986372924,0.010996666897,0.0165295858627,0.0165119051191,0.00549610710487,0.0055164285846,0.00550898983423,0.00550255862623,0.0275031524131,0.0220072200031,0.0165161302968,0.0274854635441,0.0109992141917,0.0219741254862,0.0109926286022,0.00549610710487,0.00550255862623,0.0,0.00550898983423,0.0,0.022012208963,0.0,0.0165124088903,0.0109882409176,0.0110131735288,0.0109887040621,0.00548486975723,0.00550244080877,0.00549610710487,0.0110142663873,0.0109980441425,0.0110069779557,0.00549610710487,0.0,0.0110171549463,0.0,0.0164846852242,0.00549992195283,0.0,0.0165101500452,0.0,0.0,0.00549610710487,0.0,0.0,0.0,0.0,0.00549249945314,0.00550890451814,0.00549807750025,0.00550890451814,0.0,0.00550816511204,0.0054945551646,0.00549992195283,0.0,0.00549610710487,0.00549914598269,0.0,0.0,0.0,0.00548818895934,0.0,0.0110113453269,0.0,0.0,0.00549610710487,0.00549249945314,0.0,0.00549723246471,0.0,0.00548486975723,0.0,0.0,0.0,0.00550383430488,0.0,0.0,0.00549807750025,0.00550816511204,0.0,0.0,0.0,0.0,0.0,0.00549249945314,0.0,0.0055164285846,0.0,0.0,0.0,0.0,0.0,0.0,0.00549405139341,0.0,0.0,0.0,0.0,0.00548818895934,0.00550816511204,0.0,0.00550505716882,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00547500559234,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0109999414098,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00547500559234,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00550985518312,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00549723246471,0.00549723246471,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00549405139341,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00551434443444,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00549610710487,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_12
y10_M_12_weights = numpy.array([0.287187477131,0.249670249937,0.239839070091,0.25065532396,0.239793536071,0.212133101608,0.196399093347,0.206245664974,0.189486219346,0.191437127666,0.16779879822,0.166793362152,0.183573249991,0.151978171105,0.141116583629,0.143090058545,0.153953129083,0.148993488202,0.13123794579,0.119408920707,0.120410909657,0.119424753399,0.101639669989,0.10460940202,0.104616576835,0.0986663305091,0.102626467571,0.0897829877986,0.0710536346197,0.0789638479705,0.0720249201721,0.0838795180588,0.0602044327181,0.0671120157945,0.0641332250585,0.0582205763656,0.0483385715745,0.0542868538451,0.062171694806,0.0611931141904,0.0631571295743,0.0503329296113,0.0444200404218,0.0513246974564,0.0463814905088,0.0463790855429,0.0542891385627,0.0414531542668,0.0384981446351,0.0394741719786,0.0374869085911,0.0375144414422,0.0374797097266,0.0384822758686,0.0424369055591,0.0296066210324,0.0404447119916,0.0285909398098,0.031583695381,0.0355280398342,0.0266516073923,0.0217255879342,0.0246651536767,0.0148037073355,0.0335635995729,0.0315759473826,0.0138077187753,0.0167774628313,0.0167808778829,0.0177573220871,0.0167762603484,0.0187533667633,0.0138102439894,0.0157843081225,0.027626825064,0.0148021841905,0.0157773697959,0.0157842319652,0.0138180080209,0.0148077116037,0.0167724605023,0.0167813508595,0.0118321515385,0.0128331785023,0.011845583273,0.0138088410927,0.0078972626096,0.0138123002353,0.0128309739503,0.0147971137208,0.0108530819132,0.0108636797962,0.00591703374735,0.00690603588175,0.0128285649761,0.00296062923537,0.00789847711736,0.0108478230545,0.00592254512748,0.00789797207453,0.003945116848,0.00788825601238,0.00887970720371,0.00986891776849,0.00395092243562,0.00395206038531,0.00690734658816,0.00592249702816,0.00789871360568,0.00493565936305,0.0078992026154,0.0039457954492,0.00394420576676,0.00197342120551,0.00493947524224,0.00197377112804,0.00296181328024,0.00691313854764,0.00690435641391,0.00690840477314,0.00295940390525,0.00493589985963,0.00197617609392,0.0039460187102,0.00296000033679,0.00493335460408,0.00295949769892,0.00394973077503,0.00394353117383,0.00197553236472,0.00493759936886,0.0,0.002960455677,0.00690601183209,0.00394858160217,0.00197276464982,0.00394803848071,0.0,0.00296280252287,0.00394306701541,0.0049304726533,0.000987656155171,0.00098421625231,0.00395142828011,0.000984733720802,0.00197588308891,0.000988891505977,0.0,0.00296327349535,0.0,0.00296172589981,0.00197414630272,0.00197086833423,0.0,0.00098421625231,0.00296106733999,0.00197383806626,0.000988006478534,0.0,0.000986198745849,0.000987730709113,0.001972599108,0.0,0.0,0.00197453229974,0.00197430503047,0.0,0.0,0.0,0.000983905610885,0.0,0.000988891505977,0.0,0.000983905610885,0.00098421625231,0.0,0.0,0.0,0.000986452469749,0.0,0.0,0.0,0.0,0.00197035086574,0.000985425549319,0.00098785897396,0.0,0.0,0.0,0.0,0.0,0.0,0.000988169615386,0.00197474754419,0.0,0.0,0.0,0.0,0.0,0.00296082083098,0.0,0.000986198745849,0.0,0.000986287328759,0.0,0.0,0.0,0.00098421625231,0.000987941544455,0.0,0.0,0.00098421625231,0.0,0.000988569641377,0.0,0.000984733720802,0.0,0.000983905610885,0.000988169615386,0.00098421625231,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000988569641377,0.0,0.0,0.0,0.00098785897396,0.00197732646926,0.000985425549319,0.0,0.0,0.000988169615386,0.0,0.0,0.0,0.000986198745849,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000988006478534,0.000986452469749,0.0,0.0,0.000987941544455,0.0,0.0,0.00098785897396,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000985425549319,0.000984733720802,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000987730709113,0.0,0.0,0.0,0.0,0.0,0.0,0.000988891505977,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00197422847239,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000986452469749,0.0,0.0,0.0,0.00098679116911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000985764649508,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_13
y10_M_13_weights = numpy.array([0.131072421862,0.132596061066,0.130309141878,0.119231639497,0.113177813354,0.113942773779,0.0998193933573,0.0993250638396,0.0950275378587,0.0935144214119,0.0900009808667,0.0889849146505,0.0788893095335,0.0715967588358,0.0738613921851,0.0746107085111,0.0710831442653,0.0708300779603,0.0662950097417,0.0665433948202,0.0574825808256,0.0521858330418,0.0547052931574,0.0534439226698,0.0473880159812,0.0546972110398,0.0463792716834,0.0494040641995,0.0451223023489,0.0418424430035,0.0403350480558,0.0426048827679,0.0410845644342,0.0388160380649,0.0398231819434,0.0284889603006,0.0312584939372,0.029497836633,0.0274744865016,0.0297542158061,0.0310006103699,0.0287372813624,0.0267194646808,0.0239453418418,0.0246975989382,0.0224320533499,0.0221818557965,0.0269685299375,0.0219335227315,0.0194096454586,0.0221840683762,0.0161313705284,0.0224279082639,0.0176490841796,0.0201670118937,0.0186558119491,0.016891261625,0.0151225582127,0.0141169107262,0.0143680125166,0.0151250028532,0.0143664641109,0.0171451281397,0.0143722176184,0.0108378155811,0.0126065389986,0.0113434080498,0.0110925343191,0.0113432440068,0.00806468100106,0.0133589081244,0.0128575447638,0.0113448804355,0.00857025346447,0.00756016881545,0.0095807702267,0.00856994538375,0.00832463311027,0.00856967731351,0.00781423538261,0.007816007847,0.0052985282503,0.00655691395588,0.00756432190358,0.00731119558277,0.00504292928163,0.00932826006733,0.0063020671843,0.00529257869147,0.00655365710256,0.0063020591822,0.0057970508666,0.00478952288744,0.00756320561111,0.00605080535199,0.00478818653731,0.00579868329431,0.00554162394301,0.00453749685491,0.00428632704672,0.00302523303155,0.00226911692401,0.00403330875422,0.00453950138011,0.00428535879303,0.00428368635484,0.00126064269693,0.00226981350652,0.00302671421963,0.00252049798755,0.00226767254557,0.00201804234076,0.00403317271857,0.00503980046186,0.00252094530475,0.00126040183382,0.00428560685803,0.00252241849073,0.00252121737603,0.00176380292832,0.00277258163517,0.00252062762151,0.00277227155393,0.00201689163927,0.0027721783295,0.0030264961625,0.00176450831313,0.00252051839289,0.00176591948287,0.00126052266548,0.00100782245631,0.00126156213783,0.00176336201279,0.00176400738188,0.00176442669175,0.00176422063776,0.00151298001101,0.00151348974457,0.00100757999278,0.00176343603219,0.00277244519942,0.00126111201989,0.00126037902784,0.00227019960768,0.000754986211502,0.000503627153726,0.00227079176283,0.000755883646636,0.00176537974145,0.00252294222796,0.00126186581739,0.00151201295764,0.00100816774678,0.000503779593666,0.00176579064911,0.00126098198582,0.000757071957982,0.00025218191337,0.000755143852805,0.00125919751828,0.00176362128072,0.00100811853389,0.000755173060458,0.0,0.00176429505726,0.000252126138756,0.0010098425856,0.000251870151686,0.000252215802249,0.000504526589384,0.000757621702018,0.0007558972502,0.00075502662209,0.0017639837757,0.00126033101526,0.000505176759733,0.000755956465715,0.0,0.000504121683296,0.000252085128011,0.00126091236758,0.000504259719463,0.00100930244408,0.000252143103201,0.0,0.000251610003525,0.000251953493522,0.0,0.00100789887633,0.0,0.000252444982296,0.000755245879537,0.0,0.000503465511375,0.0,0.000756320161006,0.000252319949536,0.000252353758394,0.000253126320811,0.000755712001664,0.0,0.000251452402233,0.000504084873652,0.00025218191337,0.0,0.000504569400601,0.0,0.0,0.000504226110657,0.000252013189163,0.0,0.0,0.0,0.000252319949536,0.00125951000015,0.0,0.0,0.000252013189163,0.000252143103201,0.0,0.000252143103201,0.0,0.0,0.0,0.0,0.000503529928252,0.000252081527068,0.000251934048427,0.0,0.000251452402233,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000252353758394,0.0,0.0,0.000251953493522,0.000252215802249,0.000252081527068,0.0,0.0,0.0,0.0,0.000251545666669,0.0,0.0,0.0,0.000252353758394,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251953493522,0.000252319949536,0.0,0.0,0.0,0.000253126320811,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251640051398,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000252081527068,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251953493522,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000252126138756,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_14
y10_M_14_weights = numpy.array([0.0756031591655,0.0647004616276,0.0678658899169,0.0572666760616,0.0607142526957,0.0646843851407,0.0578114270224,0.0558115640333,0.0529832320816,0.0489397556638,0.0452624892216,0.0529597672091,0.0515332089413,0.0475177863931,0.0409388140329,0.0481033284402,0.040077942151,0.0358089950032,0.033200315053,0.0354973331077,0.0423657822113,0.0291935506129,0.0303544569312,0.0323247163892,0.0326465760563,0.0249100566438,0.026048127952,0.0280757948654,0.0266158838857,0.0286253347797,0.0269161282762,0.0280572789115,0.0188861030007,0.0269041109022,0.0223429376118,0.0263552808328,0.018609523439,0.0180358487987,0.013174091192,0.0174654634395,0.0197416260515,0.0166077808607,0.0163286218626,0.0143109927558,0.0157416001388,0.0148894663475,0.0151925801109,0.0163276120833,0.0131663928742,0.0137410373027,0.0120174739365,0.0100145916072,0.0114510277166,0.00773627646558,0.012009435693,0.01346894676,0.00858630371726,0.00830400740477,0.0103145360632,0.00744518607477,0.0103133663188,0.0108785428162,0.0114417397462,0.0106006235472,0.00658930810159,0.00630446934468,0.00859540372872,0.00687110252375,0.01088786078,0.0100193305716,0.00687164240578,0.00973356601701,0.00602031143898,0.00859608657951,0.0103095171599,0.00400180152463,0.00573012685024,0.0040024363859,0.0054450301497,0.00515234010845,0.00515992745046,0.00457501326616,0.0042930778748,0.00400551171388,0.00400652849169,0.00400908993196,0.00372002509853,0.00372151977192,0.00400145260088,0.00343771778845,0.00372587681981,0.00343557225729,0.00286020798613,0.00286033695795,0.00257254184706,0.00314875193339,0.000859649549015,0.00400944285484,0.00372160275378,0.00487358902125,0.00170923849648,0.00401088753915,0.00200684146415,0.00457591306953,0.00257681391353,0.00142854983268,0.0020078182507,0.00342979052072,0.00142892275119,0.0022914402735,0.0022926090181,0.00228739615722,0.00143169614515,0.00143012148924,0.00344085510288,0.00143017747701,0.00200241443155,0.000860102749981,0.00257935035926,0.00114679140266,0.000858922307932,0.0020013236699,0.00199981000067,0.00257525925325,0.00114748225169,0.000573500778366,0.00142845385366,0.00200760829658,0.00171790260319,0.00143136221812,0.00114735927856,0.000857970315962,0.000571944218506,0.00257726181565,0.00143393265643,0.00172190472865,0.00114563765477,0.000861921752492,0.00143477347269,0.00171504922671,0.000859650448818,0.0,0.000858529693726,0.000571638585293,0.00057361965239,0.000287793111327,0.00114529173036,0.000859820111744,0.000286204058567,0.000860079355094,0.00114143857236,0.000573500778366,0.00143701198353,0.000857803252468,0.000859944284609,0.000287149052067,0.000571716068362,0.00028409371972,0.000572941100667,0.000286785031613,0.0,0.000570181903608,0.000286459702704,0.000573484581905,0.000858529693726,0.000860972060019,0.00114520374959,0.000287793111327,0.000573608854749,0.000573353210613,0.000572226456831,0.0,0.000573314019177,0.0,0.000286335529838,0.000287793111327,0.000284560617693,0.0,0.00114737527507,0.000858729250119,0.000286335529838,0.000860916372188,0.000284929737033,0.000286470600323,0.000287793111327,0.000570313274901,0.0,0.0,0.0,0.000287149052067,0.00028409371972,0.000284929737033,0.000286335529838,0.0,0.0,0.000284929737033,0.0,0.000570437547744,0.0,0.000286335529838,0.0,0.000286470600323,0.000287296719799,0.000286204058567,0.000862882942452,0.000286459702704,0.0,0.0,0.000284489133314,0.000283587430354,0.000283587430354,0.000284929737033,0.0,0.0,0.0,0.000856346470805,0.0,0.000287149052067,0.0,0.000287272924998,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000286335529838,0.000287149052067,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000286335529838,0.0,0.0,0.000571648683087,0.0,0.000283977745062,0.0,0.0,0.0,0.000283977745062,0.0,0.0,0.000286459702704,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000286843418854,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000283587430354,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000287296719799,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_15
y10_M_15_weights = numpy.array([0.0119656553677,0.0120252727729,0.0117464100028,0.0111006644456,0.0103413377779,0.0103859471038,0.0096543524832,0.0100602118516,0.00885381960618,0.00840221514276,0.00818708117853,0.00876383399592,0.00831463518385,0.0075126689745,0.00718767134357,0.00762353848845,0.00725718377112,0.00736564344268,0.00658465333967,0.00593711821257,0.0059815012228,0.00658466591276,0.00569892100892,0.00604535575952,0.00519917418114,0.00475094768809,0.00520202827274,0.00455609831021,0.0045579255994,0.00486014498098,0.0041036212743,0.00401388209661,0.00395053635096,0.00380011608464,0.00386668850448,0.0035405894708,0.00334660847439,0.00330506321073,0.00330277323181,0.00313202563063,0.0031955130331,0.00319494011927,0.00353799270847,0.00317045737792,0.00257018701344,0.00254885760325,0.00241893818045,0.00263485251452,0.00231152626645,0.00239781287343,0.00241915695223,0.00215998741417,0.00211519326387,0.00224703846506,0.00207188273827,0.00194383544868,0.00202889617932,0.00177140053873,0.00192323821147,0.00220296014272,0.00159715510371,0.00209528880395,0.00192245909895,0.00205224901891,0.00155591075603,0.00170632641221,0.00159694974323,0.00125268175526,0.00125174086898,0.00125295626774,0.00122907452013,0.00144696492496,0.00118611562198,0.00114506531899,0.00140365859039,0.00136105676801,0.00110161565119,0.000993629985151,0.00079733344453,0.00129631289467,0.00116669690247,0.000971533197318,0.000863889938449,0.00101499585731,0.000842535801174,0.000799063501812,0.000950404956573,0.000907236506904,0.000885698802505,0.000798572732171,0.000820812015033,0.000669553123568,0.000669492353629,0.00088564138539,0.000712765998158,0.000583418650989,0.000820686703228,0.000561880527487,0.000540309713949,0.00045373308768,0.000604524260171,0.000496731800625,0.000431959847382,0.000647841910517,0.000496721742153,0.000496710007268,0.000431883989734,0.000475170207354,0.000561562428292,0.000453656391827,0.000475197868153,0.000561702408702,0.000431897820134,0.000302415396061,0.000410458646856,0.00045359855561,0.000453610709597,0.000237646837552,0.000324159843982,0.000345571398371,0.00028066533216,0.00028094110195,0.000496870104623,0.000259113378258,0.000279103880024,0.00023749411641,0.000237555724555,0.000410326922776,0.000281040303635,0.000151144308698,0.000323847444588,0.000453567541986,0.000237418510225,0.000345526260975,0.000324084866451,0.000194445991219,0.000237731831646,0.000194595066165,0.000216011440238,0.000129673954155,0.000280987748116,0.000172770611477,0.000216077239413,0.000151215346661,0.000151026582659,0.000194491673449,0.000194215233094,0.000107998700144,0.000107905994555,0.000151227542559,0.000151236385633,0.000108023972056,0.000237582128045,0.000237575212845,0.000107937930205,0.000237650860941,4.31658093205e-05,0.000149623132363,0.000172794877542,0.000172833015917,0.000129493320752,0.000107991784944,0.000129542816819,6.48684726698e-05,4.32743150934e-05,0.0,8.64047521186e-05,8.63689607203e-05,0.000108070534402,6.47311745191e-05,0.000108047609466,0.000151307549326,8.63691283616e-05,0.000129594911325,0.000129593151092,4.32407449411e-05,0.000108021164066,2.16183859222e-05,0.000129692059406,6.47869571316e-05,2.16407199224e-05,4.32079291744e-05,2.16755767209e-05,0.000129635564318,6.48769385509e-05,0.000129577476639,8.64796877393e-05,8.64487579361e-05,4.31204623733e-05,4.32200831621e-05,0.0,2.15263383251e-05,2.15981767744e-05,4.32697049601e-05,2.16090063966e-05,0.0,2.15598456119e-05,8.64055484143e-05,6.47263548344e-05,4.3203444772e-05,2.15937845747e-05,6.4792698843e-05,6.47515848364e-05,2.16224721767e-05,6.31554728801e-05,0.0,0.0,6.47965126806e-05,4.31961942897e-05,6.47935789594e-05,4.32351708709e-05,6.48531334991e-05,2.15263383251e-05,2.1583143549e-05,4.31360949161e-05,2.1609764973e-05,0.0,6.48165458051e-05,0.0,4.31775861154e-05,4.32814398448e-05,2.16145930399e-05,0.0,2.1609764973e-05,0.0,6.48032183289e-05,4.31963619309e-05,0.0,2.15941366213e-05,4.31748200355e-05,8.62338838157e-05,2.15793716217e-05,4.32901571877e-05,4.31345442349e-05,2.16145930399e-05,0.0,0.0,2.16183859222e-05,0.0,4.31345442349e-05,2.15887092371e-05,2.16295927371e-05,0.0,4.31974935091e-05,0.0,2.15598456119e-05,2.16295927371e-05,2.1609764973e-05,0.0,2.15263383251e-05,0.0,0.0,0.0,2.15560485385e-05,2.15560485385e-05,0.0,0.0,4.32323628807e-05,0.0,0.0,0.0,2.16224721767e-05,2.15941366213e-05,4.31921709007e-05,2.15777916033e-05,0.0,0.0,0.0,0.0,4.32591016536e-05,2.15263383251e-05,0.0,0.0,2.15916555314e-05,2.16145930399e-05,2.16557196196e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.15793716217e-05,2.16755767209e-05,0.0,0.0,2.15567316764e-05,0.0,0.0,0.0,2.1583143549e-05,0.0,0.0,2.1583143549e-05,0.0,0.0,0.0,2.16557196196e-05,0.0,0.0,0.0,0.0,2.1583143549e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.16183859222e-05,0.0,0.0,0.0,0.0,0.0,2.15981767744e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.15793716217e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_M_16
y10_M_16_weights = numpy.array([0.00325835075344,0.00215015250485,0.00246392779931,0.00255084667768,0.00229410145463,0.00252312174609,0.00266642316693,0.0022704800149,0.00212106419811,0.00221438410794,0.00187111951405,0.00232472392969,0.00221276231528,0.00164561459183,0.00164582102881,0.00178657391834,0.00175954107043,0.0013297279966,0.00221113012652,0.0015320653434,0.00184112229055,0.00189475432021,0.00130390718623,0.00155889026918,0.00119146096472,0.00104996802091,0.00130670255082,0.00113587342738,0.00121682449492,0.000739692055984,0.00102141437109,0.000848547016663,0.000965427029189,0.00113626506358,0.00110565016282,0.000849722964855,0.00102069793082,0.00122164917955,0.000880096081871,0.000850938269736,0.000875311347989,0.000791489321311,0.000734403556471,0.00096400068334,0.000765782868107,0.000537127628511,0.000907767844802,0.000850495989651,0.000568014758749,0.000624549529931,0.000567879609361,0.000567136584759,0.000707330906322,0.00051027448473,0.000396778850505,0.000652295996315,0.000455972797282,0.000539536703481,0.000567751737248,0.000312216323994,0.000453698574669,0.000510824438778,0.000567689954671,0.000422050300899,0.00042340565619,0.000369151345286,0.000368950848941,0.00022705260548,0.000312063055677,0.000424948141403,0.000340409822969,0.000539331603146,0.000482519552945,0.000426297407546,0.000255011152442,0.00019872455118,0.000425682700604,0.000284217232216,0.000482424205795,0.000255769771204,0.000311906074465,0.000198863116432,0.000369329415743,0.000368693322525,0.000255701305415,0.000282487765597,0.000227168744844,0.000395522852292,0.000227023199349,0.000340519576153,0.000228697418086,0.0001421935523,0.000227074288788,8.66102476614e-05,0.000169991791296,0.000141777084263,0.000283530940653,0.000369104562806,0.000142111898307,8.51838721098e-05,0.00014082561772,0.00017000085076,0.000141963872598,0.000170548577071,0.000113440386343,8.50473860795e-05,0.000228607417505,0.000200593177114,8.52720607983e-05,0.000112211729891,0.000227024684507,8.50953121317e-05,8.37765808398e-05,8.52524121565e-05,5.67590152046e-05,2.83557376467e-05,0.000113700482083,0.000113360648204,2.84139706962e-05,0.000141891396883,8.52048573939e-05,8.4868469082e-05,8.5264308273e-05,5.67114604419e-05,0.000168523415473,2.84139706962e-05,0.000142031076003,5.51118857515e-05,0.000113617491448,0.000138936021354,0.0,2.83557376467e-05,5.67883322257e-05,0.0,2.83743615294e-05,8.51123023405e-05,8.53235660816e-05,2.84351490509e-05,2.84032775578e-05,0.000113606798309,2.84139706962e-05,5.46048676244e-05,5.65965983137e-05,8.51729859008e-05,8.5133376734e-05,0.000139830903373,0.000139563946203,0.0,0.0,5.67300843246e-05,0.000141657692403,0.0,2.84547976927e-05,2.84032775578e-05,0.000113407237614,5.67487082073e-05,0.0,2.83557376467e-05,0.0,2.83557376467e-05,2.84351490509e-05,2.84351490509e-05,2.84032775578e-05,5.63818147479e-05,0.0,5.68384414603e-05,2.84547976927e-05,5.68899467435e-05,5.64333200311e-05,2.84139706962e-05,2.83557376467e-05,5.66255291937e-05,0.0,0.0,2.84032775578e-05,2.84139706962e-05,0.0,2.84032775578e-05,5.67908866976e-05,5.68580752505e-05,0.0,0.0,0.0,2.84547976927e-05,2.83743615294e-05,0.0,5.68899467435e-05,8.49750737311e-05,0.0,5.65822962411e-05,2.83557376467e-05,0.0,0.0,0.0,2.84139706962e-05,0.0,2.83743615294e-05,2.84032775578e-05,2.84547976927e-05,0.0,2.61908969282e-05,0.0,0.0,0.0,2.83557376467e-05,0.0,0.0,0.0,0.0,2.82590538538e-05,0.0,2.84351490509e-05,0.0,2.82631825934e-05,0.0,0.0,0.0,5.6769708343e-05,0.0,0.0,0.0,2.83557376467e-05,2.83557376467e-05,2.84547976927e-05,0.0,2.83557376467e-05,0.0,0.0,2.84547976927e-05,2.84547976927e-05,0.0,2.84032775578e-05,0.0,0.0,0.0,0.0,0.0,0.0,5.6769708343e-05,0.0,0.0,0.0,2.84547976927e-05,2.83557376467e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84351490509e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.82053653881e-05,0.0,0.0,0.0,0.0,0.0,0.0,5.65822962411e-05,2.83743615294e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.83743615294e-05,2.83743615294e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84032775578e-05,0.0,0.0,0.0,0.0,2.83557376467e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights+y10_M_14_weights+y10_M_15_weights+y10_M_16_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights+y10_M_14_weights+y10_M_15_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights+y10_M_14_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights+y10_M_2_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights+y10_M_1_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_M_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"M [ a_{1} , a_{2} ] ( GeV ) ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights+y10_M_14_weights+y10_M_15_weights+y10_M_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y10_M_0_weights+y10_M_1_weights+y10_M_2_weights+y10_M_3_weights+y10_M_4_weights+y10_M_5_weights+y10_M_6_weights+y10_M_7_weights+y10_M_8_weights+y10_M_9_weights+y10_M_10_weights+y10_M_11_weights+y10_M_12_weights+y10_M_13_weights+y10_M_14_weights+y10_M_15_weights+y10_M_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_9.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_9.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_9.eps')
# Running!
if __name__ == '__main__':
selection_9()
| 386.737113
| 6,216
| 0.764152
| 16,359
| 75,027
| 3.45993
| 0.178434
| 0.288935
| 0.41183
| 0.522252
| 0.318899
| 0.292009
| 0.285225
| 0.264819
| 0.263087
| 0.256232
| 0
| 0.684462
| 0.022085
| 75,027
| 193
| 6,217
| 388.740933
| 0.086984
| 0.017101
| 0
| 0.185841
| 0
| 0.00885
| 0.01415
| 0.002713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0
| 0.035398
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7cfd046ff7971d5fc250d3dc53e8ccd27fd4a202
| 126,002
|
py
|
Python
|
dlcv/object_detection/tensorflow_detect/core/preprocessor_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 11
|
2019-10-07T22:06:30.000Z
|
2020-08-26T22:10:53.000Z
|
dlcv/object_detection/tensorflow_detect/core/preprocessor_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 16
|
2019-11-02T00:32:00.000Z
|
2022-02-10T00:23:32.000Z
|
dlcv/object_detection/tensorflow_detect/core/preprocessor_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 9
|
2019-10-07T13:33:13.000Z
|
2020-09-27T09:50:58.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.preprocessor."""
import numpy as np
import six
import tensorflow as tf
from object_detection.tensorflow_detect.core import standard_fields as fields, \
preprocessor, preprocessor_cache
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
class PreprocessorTest(tf.test.TestCase):
def createColorfulTestImage(self):
ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
imr = tf.concat([ch255, ch0, ch0], 3)
img = tf.concat([ch255, ch255, ch0], 3)
imb = tf.concat([ch255, ch0, ch255], 3)
imw = tf.concat([ch128, ch128, ch128], 3)
imu = tf.concat([imr, img], 2)
imd = tf.concat([imb, imw], 2)
im = tf.concat([imu, imd], 1)
return im
def createTestImages(self):
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128],
[0, 128, 128, 128], [192, 192, 128, 128]]],
dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128],
[0, 128, 192, 192], [192, 192, 128, 192]]],
dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192],
[0, 128, 128, 0], [192, 192, 192, 128]]],
dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def createEmptyTestBoxes(self):
boxes = tf.constant([[]], dtype=tf.float32)
return boxes
def createTestBoxes(self):
boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
return boxes
def createTestLabelScores(self):
return tf.constant([1.0, 0.5], dtype=tf.float32)
def createTestLabelScoresWithMissingScore(self):
return tf.constant([0.5, np.nan], dtype=tf.float32)
def createTestMasks(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def createTestKeypoints(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsInsideCrop(self):
keypoints = np.array([
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsOutsideCrop(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createKeypointFlipPermutation(self):
return np.array([0, 2, 1], dtype=np.int32)
def createTestLabels(self):
labels = tf.constant([1, 2], dtype=tf.int32)
return labels
def createTestBoxesOutOfImage(self):
boxes = tf.constant(
[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32)
return boxes
def createTestMultiClassScores(self):
return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32)
def expectedImagesAfterNormalization(self):
images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0, 0], [0.5, 0.5, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5],
[-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMaxImageAfterColorScale(self):
images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6],
[-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMinImageAfterColorScale(self):
images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4],
[-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterLeftRightFlip(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1],
[0, 0, 0, -1], [0, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1],
[0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1],
[-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterUpDownFlip(self):
images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0],
[-1, -1, 0, 0], [0, 0, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5],
[-1, -1, 0, 0], [-1, -1, 0, 0]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1],
[-1, -1, 0, 0.5], [0, 0, 0.5, -1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterRot90(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0],
[-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedBoxesAfterLeftRightFlip(self):
boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterUpDownFlip(self):
boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterRot90(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32)
return boxes
def expectedMasksAfterLeftRightFlip(self):
mask = np.array([
[[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0]],
[[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterUpDownFlip(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterRot90(self):
mask = np.array([
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0]],
[[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0],
[255.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedLabelScoresAfterThresholding(self):
return tf.constant([1.0], dtype=tf.float32)
def expectedBoxesAfterThresholding(self):
return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterThresholding(self):
return tf.constant([1], dtype=tf.float32)
def expectedMultiClassScoresAfterThresholding(self):
return tf.constant([[1.0, 0.0]], dtype=tf.float32)
def expectedMasksAfterThresholding(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedKeypointsAfterThresholding(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]
])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelScoresAfterThresholdingWithMissingScore(self):
return tf.constant([np.nan], dtype=tf.float32)
def expectedBoxesAfterThresholdingWithMissingScore(self):
return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)
def expectedLabelsAfterThresholdingWithMissingScore(self):
return tf.constant([2], dtype=tf.float32)
def testRgbToGrayscale(self):
images = self.createTestImages()
grayscale_images = preprocessor._rgb_to_grayscale(images)
expected_images = tf.image.rgb_to_grayscale(images)
with self.test_session() as sess:
(grayscale_images, expected_images) = sess.run(
[grayscale_images, expected_images])
self.assertAllEqual(expected_images, grayscale_images)
def testNormalizeImage(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 256,
'target_minval': -1,
'target_maxval': 1
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
images_expected = self.expectedImagesAfterNormalization()
with self.test_session() as sess:
(images_, images_expected_) = sess.run(
[images, images_expected])
images_shape_ = images_.shape
images_expected_shape_ = images_expected_.shape
expected_shape = [1, 4, 4, 3]
self.assertAllEqual(images_expected_shape_, images_shape_)
self.assertAllEqual(images_shape_, expected_shape)
self.assertAllClose(images_, images_expected_)
def testRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRetainBoxesAboveThresholdWithMultiClassScores(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
multiclass_scores = self.createTestMultiClassScores()
(_, _, _,
retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold(
boxes,
labels,
label_scores,
multiclass_scores=multiclass_scores,
threshold=0.6)
with self.test_session() as sess:
(retained_multiclass_scores_,
expected_retained_multiclass_scores_) = sess.run([
retained_multiclass_scores,
self.expectedMultiClassScoresAfterThresholding()
])
self.assertAllClose(retained_multiclass_scores_,
expected_retained_multiclass_scores_)
def testRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
_, _, _, retained_masks = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, masks, threshold=0.6)
with self.test_session() as sess:
retained_masks_, expected_retained_masks_ = sess.run([
retained_masks, self.expectedMasksAfterThresholding()])
self.assertAllClose(
retained_masks_, expected_retained_masks_)
def testRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, keypoints=keypoints, threshold=0.6)
with self.test_session() as sess:
(retained_keypoints_,
expected_retained_keypoints_) = sess.run([
retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(
retained_keypoints_, expected_retained_keypoints_)
def testRetainBoxesAboveThresholdWithMissingScore(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScoresWithMissingScore()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholdingWithMissingScore(),
self.expectedLabelsAfterThresholdingWithMissingScore(),
self.expectedLabelScoresAfterThresholdingWithMissingScore()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testFlipBoxesLeftRight(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_left_right(boxes)
expected_boxes = self.expectedBoxesAfterLeftRightFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testFlipBoxesUpDown(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_up_down(boxes)
expected_boxes = self.expectedBoxesAfterUpDownFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testRot90Boxes(self):
boxes = self.createTestBoxes()
rotated_boxes = preprocessor._rot90_boxes(boxes)
expected_boxes = self.expectedBoxesAfterRot90()
with self.test_session() as sess:
rotated_boxes, expected_boxes = sess.run([rotated_boxes, expected_boxes])
self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten())
def testFlipMasksLeftRight(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_left_right(test_mask)
expected_mask = self.expectedMasksAfterLeftRightFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testFlipMasksUpDown(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_up_down(test_mask)
expected_mask = self.expectedMasksAfterUpDownFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testRot90Masks(self):
test_mask = self.createTestMasks()
rotated_mask = preprocessor._rot90_masks(test_mask)
expected_mask = self.expectedMasksAfterRot90()
with self.test_session() as sess:
rotated_mask, expected_mask = sess.run([rotated_mask, expected_mask])
self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten())
def _testPreprocessorCache(self,
preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False,
num_runs=4):
cache = preprocessor_cache.PreprocessorCache()
images = self.createTestImages()
boxes = self.createTestBoxes()
classes = self.createTestLabels()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=test_masks, include_keypoints=test_keypoints)
out = []
for i in range(num_runs):
tensor_dict = {
fields.InputDataFields.image: images,
}
num_outputs = 1
if test_boxes:
tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes
tensor_dict[fields.InputDataFields.groundtruth_classes] = classes
num_outputs += 1
if test_masks:
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
num_outputs += 1
if test_keypoints:
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
num_outputs += 1
out.append(preprocessor.preprocess(
tensor_dict, preprocess_options, preprocessor_arg_map, cache))
with self.test_session() as sess:
to_run = []
for i in range(num_runs):
to_run.append(out[i][fields.InputDataFields.image])
if test_boxes:
to_run.append(out[i][fields.InputDataFields.groundtruth_boxes])
if test_masks:
to_run.append(
out[i][fields.InputDataFields.groundtruth_instance_masks])
if test_keypoints:
to_run.append(out[i][fields.InputDataFields.groundtruth_keypoints])
out_array = sess.run(to_run)
for i in range(num_outputs, len(out_array)):
self.assertAllClose(out_array[i], out_array[i - num_outputs])
def testRandomHorizontalFlip(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomHorizontalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomVerticalFlip(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected1 = self.expectedBoxesAfterUpDownFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomVerticalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomRotation90(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected1 = self.expectedBoxesAfterRot90()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithCache(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomRotation90WithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomPixelValueScale(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_min = tf.to_float(images) * 0.9 / 255.0
images_max = tf.to_float(images) * 1.1 / 255.0
images = tensor_dict[fields.InputDataFields.image]
values_greater = tf.greater_equal(images, images_min)
values_less = tf.less_equal(images, images_max)
values_true = tf.fill([1, 4, 4, 3], True)
with self.test_session() as sess:
(values_greater_, values_less_, values_true_) = sess.run(
[values_greater, values_less, values_true])
self.assertAllClose(values_greater_, values_true_)
self.assertAllClose(values_less_, values_true_)
def testRandomPixelValueScaleWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_pixel_value_scale, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomImageScale(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_scaled = tensor_dict[fields.InputDataFields.image]
images_original_shape = tf.shape(images_original)
images_scaled_shape = tf.shape(images_scaled)
with self.test_session() as sess:
(images_original_shape_, images_scaled_shape_) = sess.run(
[images_original_shape, images_scaled_shape])
self.assertTrue(
images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
self.assertTrue(
images_original_shape_[2] * 2.0 >= images_scaled_shape_[2])
def testRandomImageScaleWithCache(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomRGBtoGray(self):
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3)
images_r_diff1 = tf.squared_difference(tf.to_float(images_r),
tf.to_float(images_gray_r))
images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r),
tf.to_float(images_gray_g))
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(tf.to_float(images_g),
tf.to_float(images_gray_g))
images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g),
tf.to_float(images_gray_b))
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(tf.to_float(images_b),
tf.to_float(images_gray_b))
images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b),
tf.to_float(images_gray_r))
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
with self.test_session() as sess:
(images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
[images_r_diff, images_g_diff, images_b_diff, image_zero1])
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
def testRandomRGBtoGrayWithCache(self):
preprocess_options = [(
preprocessor.random_rgb_to_gray, {'probability': 0.5})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustBrightness(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_bright = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_bright_shape = tf.shape(images_bright)
with self.test_session() as sess:
(image_original_shape_, image_bright_shape_) = sess.run(
[image_original_shape, image_bright_shape])
self.assertAllEqual(image_original_shape_, image_bright_shape_)
def testRandomAdjustBrightnessWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_brightness, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustContrast(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_contrast = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_contrast_shape = tf.shape(images_contrast)
with self.test_session() as sess:
(image_original_shape_, image_contrast_shape_) = sess.run(
[image_original_shape, image_contrast_shape])
self.assertAllEqual(image_original_shape_, image_contrast_shape_)
def testRandomAdjustContrastWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_contrast, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustHue(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_hue, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_hue = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_hue_shape = tf.shape(images_hue)
with self.test_session() as sess:
(image_original_shape_, image_hue_shape_) = sess.run(
[image_original_shape, image_hue_shape])
self.assertAllEqual(image_original_shape_, image_hue_shape_)
def testRandomAdjustHueWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_hue, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomDistortColor(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_distort_color, {}))
images_original = self.createTestImages()
images_original_shape = tf.shape(images_original)
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_distorted_color = tensor_dict[fields.InputDataFields.image]
images_distorted_color_shape = tf.shape(images_distorted_color)
with self.test_session() as sess:
(images_original_shape_, images_distorted_color_shape_) = sess.run(
[images_original_shape, images_distorted_color_shape])
self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
def testRandomDistortColorWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_distort_color, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomJitterBoxes(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes, {}))
boxes = self.createTestBoxes()
boxes_shape = tf.shape(boxes)
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
distorted_boxes_shape = tf.shape(distorted_boxes)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_) = sess.run(
[boxes_shape, distorted_boxes_shape])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
def testRandomCropImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(3, distorted_images.get_shape()[3])
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithCache(self):
preprocess_options = [(preprocessor.random_rgb_to_gray,
{'probability': 0.5}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomCropImageGrayscale(self):
preprocessing_options = [(preprocessor.rgb_to_gray, {}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(1, distorted_images.get_shape()[3])
with self.test_session() as sess:
session_results = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = session_results
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithBoxOutOfImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxesOutOfImage()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {
'random_coef': 1.0
})]
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_label_scores = distorted_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
boxes_shape = tf.shape(boxes)
distorted_boxes_shape = tf.shape(distorted_boxes)
images_shape = tf.shape(images)
distorted_images_shape = tf.shape(distorted_images)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_, images_shape_,
distorted_images_shape_, images_, distorted_images_,
boxes_, distorted_boxes_, labels_, distorted_labels_,
label_scores_, distorted_label_scores_) = sess.run(
[boxes_shape, distorted_boxes_shape, images_shape,
distorted_images_shape, images, distorted_images,
boxes, distorted_boxes, labels, distorted_labels,
label_scores, distorted_label_scores])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
self.assertAllEqual(images_shape_, distorted_images_shape_)
self.assertAllClose(images_, distorted_images_)
self.assertAllClose(boxes_, distorted_boxes_)
self.assertAllEqual(labels_, distorted_labels_)
self.assertAllEqual(label_scores_, distorted_label_scores_)
def testRandomCropWithMockSampleDistortedBoundingBox(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = tf.constant([[0.1, 0.1, 0.8, 0.3],
[0.2, 0.4, 0.75, 0.75],
[0.3, 0.1, 0.4, 0.7]], dtype=tf.float32)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box') as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (tf.constant(
[6, 143, 0], dtype=tf.int32), tf.constant(
[190, 237, -1], dtype=tf.int32), tf.constant(
[[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
expected_boxes = tf.constant([[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, 0.0, 0.38947365, 0.57805908]],
dtype=tf.float32)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
with self.test_session() as sess:
(distorted_boxes_, distorted_labels_,
expected_boxes_, expected_labels_) = sess.run(
[distorted_boxes, distorted_labels,
expected_boxes, expected_labels])
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllEqual(distorted_labels_, expected_labels_)
def testRandomCropImageWithMultiClassScores(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_rank_,
distorted_multiclass_scores_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores_rank, distorted_multiclass_scores
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testStrictRandomCropImageWithLabelScores(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_label_scores = (
preprocessor._strict_random_crop_image(
image, boxes, labels, label_scores))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_label_scores = (
sess.run(
[new_image, new_boxes, new_labels, new_label_scores])
)
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_label_scores, [1.0, 0.5])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_masks = (
preprocessor._strict_random_crop_image(
image, boxes, labels, masks=masks))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_masks = sess.run(
[new_image, new_boxes, new_labels, new_masks])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithKeypoints(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_keypoints = (
preprocessor._strict_random_crop_image(
image, boxes, labels, keypoints=keypoints))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_keypoints = sess.run(
[new_image, new_boxes, new_labels, new_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
self.assertAllClose(
new_keypoints.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_masks])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_masks_.shape, [2, 190, 237])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
def testRunRandomCropImageWithKeypointsInsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsInsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithKeypointsOutsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsOutsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True)
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_boxes = retained_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
retained_labels = retained_tensor_dict[
fields.InputDataFields.groundtruth_classes]
retained_label_scores = retained_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
with self.test_session() as sess:
(retained_boxes_, retained_labels_,
retained_label_scores_, expected_retained_boxes_,
expected_retained_labels_, expected_retained_label_scores_) = sess.run(
[retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRunRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_instance_masks=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_masks = retained_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(retained_masks_, expected_masks_) = sess.run(
[retained_masks,
self.expectedMasksAfterThresholding()])
self.assertAllClose(retained_masks_, expected_masks_)
def testRunRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_keypoints=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_keypoints = retained_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(retained_keypoints_, expected_keypoints_) = sess.run(
[retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(retained_keypoints_, expected_keypoints_)
def testRandomCropToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRunRandomCropToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [1, 200, 200])
def testRunRandomCropToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
expected_keypoints = np.array(
[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map()
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio,
{'min_padded_size_ratio': (4.0, 4.0),
'max_padded_size_ratio': (4.0, 4.0)})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
with self.test_session() as sess:
distorted_image_, distorted_boxes_, distorted_labels_ = sess.run([
distorted_image, distorted_boxes, distorted_labels])
expected_boxes = np.array(
[[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]],
dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
def testRunRandomPadToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [2, 400, 400])
def testRunRandomPadToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
expected_keypoints = np.array([
[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]],
[[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomPadImage(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomCropPadImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_pad_image, {
'random_coef': 1.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(boxes_shape_, cropped_boxes_shape_, images_shape_,
cropped_images_shape_) = sess.run([
boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape
])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
def testRandomPadToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_) = sess.run([
boxes_shape, padded_boxes_shape, images_shape, padded_images_shape
])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertEqual(images_shape_[1], padded_images_shape_[1])
self.assertEqual(2 * images_shape_[2], padded_images_shape_[2])
def testRandomBlackPatchesWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomBlackPatches(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
blacked_images_shape = tf.shape(blacked_images)
with self.test_session() as sess:
(images_shape_, blacked_images_shape_) = sess.run(
[images_shape, blacked_images_shape])
self.assertAllEqual(images_shape_, blacked_images_shape_)
def testRandomResizeMethodWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomResizeMethod(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
resized_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
resized_images = resized_tensor_dict[fields.InputDataFields.image]
resized_images_shape = tf.shape(resized_images)
expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)
with self.test_session() as sess:
(expected_images_shape_, resized_images_shape_) = sess.run(
[expected_images_shape, resized_images_shape])
self.assertAllEqual(expected_images_shape_,
resized_images_shape_)
def testResizeImageWithMasks(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithMasksTensorInputHeightAndWidth(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = tf.constant(50, dtype=tf.int32)
width = tf.constant(100, dtype=tf.int32)
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithNoInstanceMask(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangePreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_image.get_shape().as_list(), expected_shape)
def testResizeToRangeWithDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape,
feed_dict={in_image:
np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self):
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
self.assertAllEqual(out_image.shape.as_list(), expected_shape)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(
out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self):
in_image_np = np.array([[[0, 1, 2]]], np.float32)
ex_image_np = np.array(
[[[0, 1, 2], [123.68, 116.779, 103.939]],
[[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32)
min_dim = 1
max_dim = 2
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True,
per_channel_pad_value=(123.68, 116.779, 103.939))
with self.test_session() as sess:
out_image_np = sess.run(out_image, feed_dict={in_image: in_image_np})
self.assertAllClose(ex_image_np, out_image_np)
def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape)
self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape)
def testResizeToRangeWithMasksAndPadToMaxDimension(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[100, 100, 3], [100, 100, 3]]
expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]]
for (in_image_shape,
expected_image_shape, in_masks_shape, expected_mask_shape) in zip(
in_image_shape_list, expected_image_shape_list,
in_masks_shape_list, expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image,
in_masks,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRange4DImageTensor(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_range(image, 500, 600)
def testResizeToRangeSameMinMax(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToMinDimensionTensorShapes(self):
in_image_shape_list = [[60, 55, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 55], [10, 15, 30]]
min_dim = 50
expected_image_shape_list = [[60, 55, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_min_dimension(image, 500)
def testScaleBoxesToPixelCoordinates(self):
"""Tests box scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6],
[0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6., 8., 24., 24.],
[30., 12., 54., 28.]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
_, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes)
with self.test_session() as sess:
out_boxes = sess.run(out_boxes)
self.assertAllClose(out_boxes, expected_boxes)
def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
"""Tests box and keypoint scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0., 10., 45., 40.],
[15., 20., 45., 40.]]
expected_keypoints = [
[[6., 4.], [12., 8.], [18., 12.]],
[[24., 16.], [30., 20.], [36., 24.]],
]
in_image = tf.random_uniform(in_shape)
_, out_boxes, out_keypoints = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes, keypoints=in_keypoints)
with self.test_session() as sess:
out_boxes_, out_keypoints_ = sess.run([out_boxes, out_keypoints])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
def testSubtractChannelMean(self):
"""Tests whether channel means have been subtracted."""
with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == -1).all())
self.assertTrue((actual[:, :, 1] == -2).all())
self.assertTrue((actual[:, :, 2] == -3).all())
def testOneHotEncoding(self):
"""Tests one hot encoding of multiclass labels."""
with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
def testSSDRandomCropWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testSSDRandomCrop(self):
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropWithMultiClassScores(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_multiclass_scores=True)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_,
distorted_multiclass_scores_rank_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores, distorted_multiclass_scores_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testSSDRandomCropPad(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_pad, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatioWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def _testSSDRandomCropFixedAspectRatio(self,
include_label_scores,
include_multiclass_scores,
include_instance_masks,
include_keypoints):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
if include_label_scores:
label_scores = self.createTestLabelScores()
tensor_dict[fields.InputDataFields.groundtruth_label_scores] = (
label_scores)
if include_multiclass_scores:
multiclass_scores = self.createTestMultiClassScores()
tensor_dict[fields.InputDataFields.multiclass_scores] = (
multiclass_scores)
if include_instance_masks:
masks = self.createTestMasks()
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if include_keypoints:
keypoints = self.createTestKeypoints()
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=include_label_scores,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatio(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=True,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=True,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testConvertClassLogitsToSoftmax(self):
multiclass_scores = tf.constant(
[[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32)
temperature = 2.0
converted_multiclass_scores = (
preprocessor.convert_class_logits_to_softmax(
multiclass_scores=multiclass_scores, temperature=temperature))
expected_converted_multiclass_scores = [[[0.62245935, 0.37754068],
[0.5, 0.5], [1, 0]]]
with self.test_session() as sess:
(converted_multiclass_scores_) = sess.run([converted_multiclass_scores])
self.assertAllClose(converted_multiclass_scores_,
expected_converted_multiclass_scores)
if __name__ == '__main__':
tf.test.main()
| 43.949076
| 80
| 0.670331
| 13,990
| 126,002
| 5.705647
| 0.036383
| 0.032948
| 0.032071
| 0.062138
| 0.863697
| 0.847097
| 0.820626
| 0.791361
| 0.766355
| 0.744995
| 0
| 0.037772
| 0.228893
| 126,002
| 2,866
| 81
| 43.96441
| 0.783771
| 0.011651
| 0
| 0.730678
| 0
| 0
| 0.018724
| 0.002772
| 0
| 0
| 0
| 0
| 0.073344
| 1
| 0.055205
| false
| 0
| 0.002366
| 0.004338
| 0.071767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b39024d151db2f0f1396703f2c340fd15b9b1f6
| 19,627
|
py
|
Python
|
test/commands/extended/get_bundles_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
test/commands/extended/get_bundles_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
test/commands/extended/get_bundles_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from cornode import Address, BadApiResponse, Bundle, BundleHash, Fragment, Hash, \
cornode, Tag, Transaction, TransactionHash, TransactionTrytes
from cornode.adapter import MockAdapter
from cornode.commands.extended.get_bundles import GetBundlesCommand
from cornode.filters import Trytes
from six import binary_type, text_type
class GetBundlesRequestFilterTestCase(BaseFilterTestCase):
filter_type = GetBundlesCommand(MockAdapter()).get_request_filter
skip_value_check = True
def setUp(self):
super(GetBundlesRequestFilterTestCase, self).setUp()
# noinspection SpellCheckingInspection
self.transaction = (
b'ORLSCIMM9ZONOUSPYYWLOEMXQZLYEHCBEDQSHZOG'
b'OPZCZCDZYTDPGEEUXWUZ9FQYCT9OGS9PICOOX9999'
)
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'transaction': TransactionHash(self.transaction)
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
Request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
# Any TrytesCompatible value will work here.
'transaction': binary_type(self.transaction),
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'transaction': TransactionHash(self.transaction),
},
)
def test_fail_empty(self):
"""
Request is empty.
"""
self.assertFilterErrors(
{},
{
'transaction': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
Request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'transaction': TransactionHash(self.transaction),
# SAY "WHAT" AGAIN!
'what': 'augh!',
},
{
'what': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_transaction_wrong_type(self):
"""
``transaction`` is not a TrytesCompatible value.
"""
self.assertFilterErrors(
{
'transaction': text_type(self.transaction, 'ascii'),
},
{
'transaction': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_transaction_not_trytes(self):
"""
``transaction`` contains invalid characters.
"""
self.assertFilterErrors(
{
'transaction': b'not valid; must contain only uppercase and "9"',
},
{
'transaction': [Trytes.CODE_NOT_TRYTES],
},
)
# noinspection SpellCheckingInspection
class GetBundlesCommandTestCase(TestCase):
def setUp(self):
super(GetBundlesCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = GetBundlesCommand(self.adapter)
def test_wireup(self):
"""
Verifies that the command is wired up correctly.
"""
self.assertIsInstance(
cornode(self.adapter).getBundles,
GetBundlesCommand,
)
def test_single_transaction(self):
"""
Getting a bundle that contains a single transaction.
"""
transaction =\
Transaction(
current_index = 0,
last_index = 0,
tag = Tag(b''),
timestamp = 1484960990,
value = 0,
# These values are not relevant for 0-value transactions.
nonce = Hash(b''),
signature_message_fragment = Fragment(b''),
# This value is computed automatically, so it has to be real.
hash_ =
TransactionHash(
b'TAOICZV9ZSXIZINMNRLOLCWNLL9IDKGVWTJITNGU'
b'HAIKLHZLBZWOQ9HJSODUDISTYGIYPWTYDCFMVRBQN'
),
address =
Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999OCSGVF'
b'IBQA99KGTCPCZ9NHR9VGLGADDDIEGGPCGBDEDDTBC'
),
bundle_hash =
BundleHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999DIOAZD'
b'M9AIUHXGVGBC9EMGI9SBVBAIXCBFJ9EELCPDRAD9U'
),
branch_transaction_hash =
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999BBCEDI'
b'ZHUDWBYDJEXHHAKDOCKEKDFIMB9AMCLFW9NBDEOFV'
),
trunk_transaction_hash =
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION999999ARAYA'
b'MHCB9DCFEIWEWDLBCDN9LCCBQBKGDDAECFIAAGDAS'
),
)
self.adapter.seed_response('getTrytes', {
'trytes': [transaction.as_tryte_string()],
})
response = self.command(transaction=transaction.hash)
bundle = response['bundles'][0] # type: Bundle
self.assertEqual(len(bundle), 1)
self.maxDiff = None
self.assertDictEqual(
bundle[0].as_json_compatible(),
transaction.as_json_compatible(),
)
def test_multiple_transactions(self):
"""
Getting a bundle that contains multiple transactions.
"""
bundle = Bundle.from_tryte_strings([
TransactionTrytes(
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999WUQXEGBVIECGIWO9IGSYKWWPYCIVUJJGSJPWGIAFJPYSF9NSQOHWAHS9P'
b'9PWQHOBXNNQIF9IRHVQXKPZW999999999999999999999999999999999999999999'
b'999999999999HNLFMVD99A99999999A99999999PDQWLVVDPUU9VIBODGMRIAZPGQX'
b'DOGSEXIHKIBWSLDAWUKZCZMK9Z9YZSPCKBDJSVDPRQLJSTKUMTNVSXBGUEHHGAIWWQ'
b'BCJZHZAQOWZMAIDAFUZBVMUVPWQJLUGGQKNKLMGTWXXNZKUCBJLEDAMYVRGABAWBY9'
b'999MYIYBTGIOQYYZFJBLIAWMPSZEFFTXUZPCDIXSLLQDQSFYGQSQOGSPKCZNLVSZ9L'
b'MCUWVNGEN9EJEW9999XZUIENOTTBKJMDPRXWGQYG9PWGTXUO9AXMP9FLMDRMADLRPW'
b'CZCJBROYCDRJMYU9HDYJM9NDBFUPIZVTR'
),
# Well, it was bound to happen sooner or later... the ASCII
# representation of this tryte sequence contains a very naughty
# phrase. But I don't feel like doing another POW, so... enjoy.
TransactionTrytes(
b'NBTCPCFDEACCPCBDVC9DTCQAJ9RBTC9D9DCDQAEAKDCDFD9DSCFAJ9VBCDJDTCQAJ9'
b'ZBMDYBCCKB99999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999HNLFMVD99999999999A99999999PDQWLVVDPUU9VIBODGMRIAZPGQX'
b'DOGSEXIHKIBWSLDAWUKZCZMK9Z9YZSPCKBDJSVDPRQLJSTKUMTNVSXFSEWUNJOEGNU'
b'I9QOCRFMYSIFAZLJHKZBPQZZYFG9ORYCRDX9TOMJPFCRB9R9KPUUGFPVOWYXFIWEW9'
b'999BGUEHHGAIWWQBCJZHZAQOWZMAIDAFUZBVMUVPWQJLUGGQKNKLMGTWXXNZKUCBJL'
b'EDAMYVRGABAWBY9999SYRABNN9JD9PNDLIKUNCECUELTOQZPSBDILVHJQVCEOICFAD'
b'YKZVGMOAXJRQNTCKMHGTAUMPGJJMX9LNF'
),
])
for txn in bundle:
self.adapter.seed_response('getTrytes', {
'trytes': [txn.as_tryte_string()],
})
self.adapter.seed_response('getTrytes', {
'trytes': [
'SPAMSPAMSPAM999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999999999999999999'
'999999999999999999999999999999999999999999999999999JECDITWO9999999'
'999999999999ONLFMVD99999999999999999999VVCHSQSRVFKSBONDWB9EAQEMQOY'
'YRBIZHTBJLYNAVDHZPUZAZ9LYHXWKBEJ9IPR9FAMFLT9EEOHVYWUPRHHSRCILCLWFD'
'GBYBFFOKMCSAPVD9VGZZRRGBLGMZMXD9RMZQDBLMGN9BATWZGULRBCYQEIKIRBPHC9'
'999KTLTRSYOWBD9HVNP9GCUABARNGMYXUZKXWRPGOPETZLKYYC9Z9EYXIWVARUBMBM'
'BPXGORN9WPBLY99999ZRBVQWULRFXDNDYZKRKIXPZQT9JJJH9FZU9PVWZJWLXBPODP'
'EHMKTTAGEPLPHUQCZNLDSHERONOMHJCOI'
],
})
response = self.command(
transaction =
TransactionHash(
b'TOYJPHKMLQNDVLDHDILARUJCCIUMQBLUSWPCTIVA'
b'DRXICGYDGSVPXFTILFFGAPICYHGGJ9OHXINFX9999'
),
)
self.maxDiff = None
self.assertListEqual(
response['bundles'][0].as_json_compatible(),
bundle.as_json_compatible(),
)
def test_non_tail_transaction(self):
"""
Trying to get a bundle for a non-tail transaction.
This is not valid; you have to start with a tail transaction.
"""
self.adapter.seed_response('getTrytes', {
'trytes': [
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999999999999999999999999999999999999999999999999999999999999'
b'999999999WUQXEGBVIECGIWO9IGSYKWWPYCIVUJJGSJPWGIAFJPYSF9NSQOHWAHS9P'
b'9PWQHOBXNNQIF9IRHVQXKPZW999999999999999999999999999999999999999999'
b'999999999999HNLFMVD99A99999999A99999999PDQWLVVDPUU9VIBODGMRIAZPGQX'
b'DOGSEXIHKIBWSLDAWUKZCZMK9Z9YZSPCKBDJSVDPRQLJSTKUMTNVSXBGUEHHGAIWWQ'
b'BCJZHZAQOWZMAIDAFUZBVMUVPWQJLUGGQKNKLMGTWXXNZKUCBJLEDAMYVRGABAWBY9'
b'999MYIYBTGIOQYYZFJBLIAWMPSZEFFTXUZPCDIXSLLQDQSFYGQSQOGSPKCZNLVSZ9L'
b'MCUWVNGEN9EJEW9999XZUIENOTTBKJMDPRXWGQYG9PWGTXUO9AXMP9FLMDRMADLRPW'
b'CZCJBROYCDRJMYU9HDYJM9NDBFUPIZVTR'
],
})
with self.assertRaises(BadApiResponse):
self.command(
transaction =
TransactionHash(
b'FSEWUNJOEGNUI9QOCRFMYSIFAZLJHKZBPQZZYFG9'
b'ORYCRDX9TOMJPFCRB9R9KPUUGFPVOWYXFIWEW9999'
),
)
def test_missing_transaction(self):
"""
Unable to find the requested transaction.
"""
self.adapter.seed_response('getTrytes', {'trytes': []})
with self.assertRaises(BadApiResponse):
self.command(
transaction =
TransactionHash(
b'FSEWUNJOEGNUI9QOCRFMYSIFAZLJHKZBPQZZYFG9'
b'ORYCRDX9TOMJPFCRB9R9KPUUGFPVOWYXFIWEW9999'
),
)
| 44.913043
| 82
| 0.778061
| 890
| 19,627
| 17.053933
| 0.261798
| 0.437014
| 0.443537
| 0.847543
| 0.699038
| 0.693504
| 0.68204
| 0.675583
| 0.66715
| 0.66715
| 0
| 0.57222
| 0.172467
| 19,627
| 436
| 83
| 45.016055
| 0.362271
| 0.051511
| 0
| 0.593023
| 0
| 0
| 0.633301
| 0.619629
| 0
| 1
| 0
| 0
| 0.040698
| 1
| 0.037791
| false
| 0.011628
| 0.026163
| 0
| 0.075581
| 0.002907
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8626a8d826cba371b33c9abd050faaa6598d82cb
| 1,120
|
py
|
Python
|
triple_walk/rw.py
|
udel-cbcb/triple_walk
|
8d0ea465784e34d1fce2cc83b99b3cbb1d60ab24
|
[
"MIT"
] | null | null | null |
triple_walk/rw.py
|
udel-cbcb/triple_walk
|
8d0ea465784e34d1fce2cc83b99b3cbb1d60ab24
|
[
"MIT"
] | null | null | null |
triple_walk/rw.py
|
udel-cbcb/triple_walk
|
8d0ea465784e34d1fce2cc83b99b3cbb1d60ab24
|
[
"MIT"
] | null | null | null |
import triple_walk_native
def walk_triples(triples_indexed, relation_tail_index,target_nodes, walk_length,padding_idx,seed,restart=True):
return triple_walk_native.walk_triples(triples_indexed,
relation_tail_index,
target_nodes,
walk_length,
padding_idx,
restart,
seed
)
def to_windows_cbow(walks, window_size, num_nodes,seed):
return triple_walk_native.to_windows_cbow(walks, window_size, num_nodes,seed)
def to_windows_triples_sg(walks, window_size, num_nodes,padding_idx,triples,seed):
return triple_walk_native.to_windows_triples(walks, window_size,num_nodes,padding_idx,triples,seed)
def to_windows_triples_cbow(walks, window_size, num_nodes,padding_idx,triples,seed):
return triple_walk_native.to_windows_triples_cbow(walks, window_size,num_nodes,padding_idx,triples,seed)
| 50.909091
| 111
| 0.60625
| 124
| 1,120
| 5.032258
| 0.217742
| 0.096154
| 0.144231
| 0.173077
| 0.875
| 0.839744
| 0.839744
| 0.804487
| 0.804487
| 0.605769
| 0
| 0
| 0.3375
| 1,120
| 21
| 112
| 53.333333
| 0.84097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.0625
| 0.25
| 0.5625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
862c8717b0399b8bb6d61e3c1e39804fb752267a
| 1,573
|
py
|
Python
|
training/kickoff/kickoff_exercises.py
|
NoMoor/83Plus
|
5cb72871ed33c9484c5699496db106f24338564e
|
[
"MIT"
] | null | null | null |
training/kickoff/kickoff_exercises.py
|
NoMoor/83Plus
|
5cb72871ed33c9484c5699496db106f24338564e
|
[
"MIT"
] | 5
|
2019-12-27T15:04:48.000Z
|
2020-03-06T17:36:41.000Z
|
training/kickoff/kickoff_exercises.py
|
NoMoor/83Plus
|
5cb72871ed33c9484c5699496db106f24338564e
|
[
"MIT"
] | null | null | null |
from kickoff.kickoff_training import KickOff, KickOff1v1, KickOffOrange
from math import pi
kickoff_exercises = [
# KickOff('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOff('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
KickOff('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
# KickOff('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
kickoff_orange_exercises = [
# KickOffOrange('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOffOrange('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOffOrange('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOffOrange('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
KickOffOrange('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
kickoff_1v1_exercises = [
# KickOff1v1('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOff1v1('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff1v1('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff1v1('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
KickOff1v1('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
| 58.259259
| 100
| 0.696122
| 251
| 1,573
| 4.039841
| 0.111554
| 0.236686
| 0.221893
| 0.236686
| 0.821499
| 0.821499
| 0.791913
| 0.791913
| 0.791913
| 0.791913
| 0
| 0.102564
| 0.132231
| 1,573
| 26
| 101
| 60.5
| 0.640293
| 0.66815
| 0
| 0
| 0
| 0
| 0.074656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8646230ef5207bae6126a4c65a3d70d471b04e34
| 16,100
|
py
|
Python
|
viewer/command/command.py
|
Lukasz1928/DICOM-viewer
|
778541d85c6e6a96e90c9d1050f3dec2b8387b5d
|
[
"MIT"
] | null | null | null |
viewer/command/command.py
|
Lukasz1928/DICOM-viewer
|
778541d85c6e6a96e90c9d1050f3dec2b8387b5d
|
[
"MIT"
] | null | null | null |
viewer/command/command.py
|
Lukasz1928/DICOM-viewer
|
778541d85c6e6a96e90c9d1050f3dec2b8387b5d
|
[
"MIT"
] | null | null | null |
import math
from abc import ABC
from viewer.command.status import CommandStatus
from viewer.math.utils import vectors_differ, radians_to_degrees, points_to_vector, normalize_vector, sum_vectors, \
vector_length, vectors_angle
class Command(ABC):
def execute(self):
pass
def undo(self):
pass
class ComplexCommand(Command):
def __init__(self, canvas, commands=None):
self.canvas = canvas
if commands is None:
commands = []
self.commands = commands
def execute(self):
for c in self.commands:
c.execute()
def add_command(self, command, execute=True):
self.commands.append(command)
if execute:
command.execute()
def undo(self):
for c in reversed(self.commands):
c.undo()
class CurveCommand(ComplexCommand):
def __init__(self, canvas, color):
ComplexCommand.__init__(self, canvas)
self.color = color
self.prev_point = None
def add_point(self, point, final=False):
if self.prev_point is not None:
dc = LineCommand(self.canvas, self.prev_point, point, self.color)
dc.execute()
self.commands.append(dc)
self.prev_point = point
return CommandStatus.SUCCESS if final else CommandStatus.IN_PROGRESS
class TextCommand(Command):
def __init__(self, canvas, text, color, location):
self.id = None
self.canvas = canvas
self.text = text
self.color = color
self.location = location
def execute(self):
self.id = self.canvas.create_text(self.location[0], self.location[1], text=self.text, fill=self.color)
def undo(self):
self.canvas.delete(self.id)
class LineCommand(Command):
def __init__(self, canvas, start_point, end_point, color):
self.id = None
self.start_point = start_point
self.end_point = end_point
self.canvas = canvas
self.color = color
def execute(self):
self.id = self.canvas.create_line(self.start_point[0], self.start_point[1],
self.end_point[0], self.end_point[1], fill=self.color, width=3)
def undo(self):
self.canvas.delete(self.id)
class AngleCommand(ComplexCommand):
def __init__(self, canvas, color, pixel_spacing, rescale_factor, with_measurement=True):
ComplexCommand.__init__(self, canvas)
self.color = color
self.points = []
self.confirmed = 0
self.pixel_spacing = pixel_spacing
self.rescale_factor = rescale_factor
self.measure = with_measurement
def add_point(self, point, final=False):
if final:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
self.confirmed += 1
if len(self.points) in [2, 3]:
command = LineCommand(self.canvas, self.points[len(self.points) - 2],
self.points[len(self.points) - 1], self.color)
command.execute()
self.commands.append(command)
else:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
if len(self.points) in [2, 3]:
command = LineCommand(self.canvas, self.points[len(self.points) - 2],
self.points[len(self.points) - 1], self.color)
command.execute()
self.commands.append(command)
status = self._get_execution_status(final)
if status == CommandStatus.SUCCESS and self.measure:
angle = self._calculate_angle()
self._print_angle_label(angle)
return status
def _is_correct(self):
return len(self.points) == 3 and vectors_differ(self.points[0], self.points[1]) and vectors_differ(
self.points[1], self.points[2])
def _get_execution_status(self, final):
if final and self._is_correct():
return CommandStatus.SUCCESS
if final and len(self.points) == 3 and not self._is_correct():
return CommandStatus.FAIL
return CommandStatus.IN_PROGRESS
def _calculate_angle(self):
p1, p2, p3 = self.points[0], self.points[1], self.points[2]
v1 = ((p1[0] - p2[0]) * self.pixel_spacing[0] / self.rescale_factor[0],
(p1[1] - p2[1]) * self.pixel_spacing[1] / self.rescale_factor[1])
v2 = ((p3[0] - p2[0]) * self.pixel_spacing[0] / self.rescale_factor[0],
(p3[1] - p2[1]) * self.pixel_spacing[1] / self.rescale_factor[1])
angle = vectors_angle(v1, v2)
deg_angle = round(radians_to_degrees(angle), 2)
return deg_angle
def _print_angle_label(self, angle):
loc = self._calculate_label_location()
text_command = TextCommand(self.canvas, angle, self.color, loc)
text_command.execute()
self.commands.append(text_command)
def _calculate_label_location(self):
distance = 10
v1, v2 = points_to_vector(self.points[1], self.points[0]), points_to_vector(self.points[1], self.points[2])
norm_v1, norm_v2 = normalize_vector(v1), normalize_vector(v2)
norm_label_vector = normalize_vector(sum_vectors(norm_v1, norm_v2)) if vector_length(
sum_vectors(norm_v1, norm_v2)) != 0 else (1.0 / math.sqrt(2.0), 1.0 / math.sqrt(2.0))
dx = distance * norm_label_vector[0]
dy = distance * norm_label_vector[1]
loc = sum_vectors(self.points[1], (dx, dy))
return loc
class RectangleCommand(ComplexCommand):
def __init__(self, canvas, color, pixel_spacing, rescale_factor, with_measurement=True):
ComplexCommand.__init__(self, canvas)
self.color = color
self.points = []
self.confirmed = 0
self.pixel_spacing = pixel_spacing
self.rescale_factor = rescale_factor
self.measure = with_measurement
class RectCommand(Command):
def __init__(self, canvas, point1, point2, color):
self.canvas = canvas
self.color = color
self.point1 = point1
self.point2 = point2
self.id = None
def execute(self):
self.id = self.canvas.create_rectangle(self.point1[0], self.point1[1], self.point2[0], self.point2[1],
outline=self.color, width=3)
def undo(self):
self.canvas.delete(self.id)
def add_point(self, point, final=False):
if final:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
self.confirmed += 1
if len(self.points) == 2:
command = self.RectCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
else:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
if len(self.points) == 2:
command = self.RectCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
status = self._get_execution_status(final)
if status == CommandStatus.SUCCESS and self.measure:
self._print_label()
return status
def _is_correct(self):
return len(self.points) == 2 and abs(self.points[0][0] - self.points[1][0]) > 0 and abs(
self.points[0][1] - self.points[1][1]) > 0
def _get_execution_status(self, final):
if final and self._is_correct():
return CommandStatus.SUCCESS
if final and len(self.points) == 2 and not self._is_correct():
return CommandStatus.FAIL
return CommandStatus.IN_PROGRESS
def _calculate_label_location(self):
dx = 0
dy = 20
x, y = max([x[0] for x in self.points]) + dx, max([x[1] for x in self.points]) + dy
if x >= self.canvas.winfo_width():
x = x - 2 * dx
if y >= self.canvas.winfo_width():
y = y - 2 * dy
return x, y
def _print_label(self):
loc = self._calculate_label_location()
area = round(self._calculate_area(), 2)
perimeter = round(self._calculate_perimeter(), 2)
text = "Area: {} mm2\nPerim.: {} mm".format(area, perimeter)
text_command = TextCommand(self.canvas, text, self.color, loc)
text_command.execute()
self.commands.append(text_command)
def _calculate_area(self):
width = abs(self.points[0][0] - self.points[1][0]) * self.pixel_spacing[0] / self.rescale_factor[0]
height = abs(self.points[0][1] - self.points[1][1]) * self.pixel_spacing[1] / self.rescale_factor[1]
return width * height
def _calculate_perimeter(self):
width = abs(self.points[0][0] - self.points[1][0]) * self.pixel_spacing[0] / self.rescale_factor[0]
height = abs(self.points[0][1] - self.points[1][1]) * self.pixel_spacing[1] / self.rescale_factor[1]
return 2 * (width + height)
class EllipseCommand(ComplexCommand):
def __init__(self, canvas, color, pixel_spacing, rescale_factor, with_measurement=True):
ComplexCommand.__init__(self, canvas)
self.color = color
self.points = []
self.confirmed = 0
self.pixel_spacing = pixel_spacing
self.rescale_factor = rescale_factor
self.measure = with_measurement
class OvalCommand(Command):
def __init__(self, canvas, point1, point2, color):
self.canvas = canvas
self.color = color
self.point1 = point1
self.point2 = point2
self.id = None
def execute(self):
self.id = self.canvas.create_oval(self.point1[0], self.point1[1], self.point2[0], self.point2[1],
outline=self.color, width=3)
def undo(self):
self.canvas.delete(self.id)
def add_point(self, point, final=False):
if final:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
self.confirmed += 1
if len(self.points) == 2:
command = self.OvalCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
else:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
if len(self.points) == 2:
command = self.OvalCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
status = self._get_execution_status(final)
if status == CommandStatus.SUCCESS and self.measure:
self._print_label()
return status
def _calculate_label_location(self):
dx = 0
dy = 20
x, y = max([x[0] for x in self.points]) + dx, max([x[1] for x in self.points]) + dy
if x >= self.canvas.winfo_width():
x = x - 2 * dx
if y >= self.canvas.winfo_width():
y = y - 2 * dy
return x, y
def _is_correct(self):
return len(self.points) == 2 and abs(self.points[0][0] - self.points[1][0]) > 0 and abs(
self.points[0][1] - self.points[1][1]) > 0
def _get_execution_status(self, final):
if final and self._is_correct():
return CommandStatus.SUCCESS
if final and len(self.points) == 2 and not self._is_correct():
return CommandStatus.FAIL
return CommandStatus.IN_PROGRESS
def _print_label(self):
loc = self._calculate_label_location()
area = round(self._calculate_area(), 2)
perimeter = round(self._calculate_perimeter(), 2)
text = "Area: {} mm2\nPerim.: {} mm".format(area, perimeter)
text_command = TextCommand(self.canvas, text, self.color, loc)
text_command.execute()
self.commands.append(text_command)
def _calculate_area(self):
width, height = self._calculate_dimensions()
return width * height * math.pi
def _calculate_perimeter(self):
width, height = self._calculate_dimensions()
h = ((width - height) ** 2) / ((width + height) ** 2)
return math.pi * (width + height) * (1 + (3 * h) / (10 + math.sqrt(4 - 3 * h)))
def _calculate_dimensions(self):
width = abs(self.points[0][0] - self.points[1][0]) / 2.0 * self.pixel_spacing[0] / self.rescale_factor[0]
height = abs(self.points[0][1] - self.points[1][1]) / 2.0 * self.pixel_spacing[1] / self.rescale_factor[1]
return width, height
class DistanceCommand(ComplexCommand):
def __init__(self, canvas, color, pixel_spacing, rescale_factor, with_measurement=True):
ComplexCommand.__init__(self, canvas)
self.color = color
self.points = []
self.confirmed = 0
self.pixel_spacing = pixel_spacing
self.rescale_factor = rescale_factor
self.measure = with_measurement
def add_point(self, point, final=False):
if final:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
self.confirmed += 1
if len(self.points) == 2:
command = LineCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
else:
if len(self.points) > self.confirmed:
self.points.pop()
self.commands.pop().undo()
self.points.append(point)
if len(self.points) == 2:
command = LineCommand(self.canvas, self.points[0], self.points[1], self.color)
command.execute()
self.commands.append(command)
status = self._get_execution_status(final)
if status == CommandStatus.SUCCESS and self.measure:
self._print_label()
return status
def _is_correct(self):
return len(self.points) == 2 and vector_length(points_to_vector(self.points[0], self.points[1])) > 1
def _get_execution_status(self, final):
if final and self._is_correct():
return CommandStatus.SUCCESS
if final and len(self.points) == 2 and not self._is_correct():
return CommandStatus.FAIL
return CommandStatus.IN_PROGRESS
def _calculate_label_location(self):
x_r, y_r = max(self.points, key=lambda p: p[0])
x_l, y_l = min(self.points, key=lambda p: p[0])
if x_l == x_r or (y_r - y_l) / (x_r - x_l) < -0.5:
dx = 45
dy = 10
elif (y_r - y_l) / (x_r - x_l) > 0:
dx = 0
dy = 10
else:
dx = 0
dy = -10
return x_r + dx, y_r + dy
def _print_label(self):
loc = self._calculate_label_location()
length = round(self._calculate_length(), 2)
text = "Length: {} mm".format(length)
text_command = TextCommand(self.canvas, text, self.color, loc)
text_command.execute()
self.commands.append(text_command)
def _calculate_length(self):
dx = (self.points[0][0] - self.points[1][0]) * self.pixel_spacing[0] / self.rescale_factor[0]
dy = (self.points[0][1] - self.points[1][1]) * self.pixel_spacing[1] / self.rescale_factor[1]
return vector_length((dx, dy))
| 38.424821
| 116
| 0.589814
| 2,016
| 16,100
| 4.541667
| 0.068452
| 0.117955
| 0.039755
| 0.026212
| 0.815094
| 0.77927
| 0.749235
| 0.74159
| 0.717562
| 0.705876
| 0
| 0.023352
| 0.292484
| 16,100
| 418
| 117
| 38.516746
| 0.780441
| 0
| 0
| 0.738764
| 0
| 0
| 0.004161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.143258
| false
| 0.005618
| 0.011236
| 0.011236
| 0.275281
| 0.022472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
864ecc5e4644faf5114c365ecd8fc3c9a8bbb8b8
| 228,837
|
py
|
Python
|
LeetCode/daily/2021-05-12.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | 1
|
2021-05-11T09:52:38.000Z
|
2021-05-11T09:52:38.000Z
|
LeetCode/daily/2021-05-12.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | null | null | null |
LeetCode/daily/2021-05-12.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | 1
|
2021-05-05T04:13:17.000Z
|
2021-05-05T04:13:17.000Z
|
"""
Range Sum Query 2D - Immutable
https://leetcode.com/explore/challenge/card/may-leetcoding-challenge-2021/599/week-2-may-8th-may-14th/3740/
"""
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
testcases = {
'1': (
[
["NumMatrix", "sumRegion", "sumRegion", "sumRegion"],
[
[[[3, 0, 1, 4, 2], [5, 6, 3, 2, 1], [1, 2, 0, 1, 5], [4, 1, 0, 1, 7], [1, 0, 3, 0, 5]]],
[2, 1, 4, 3],
[1, 1, 2, 2],
[1, 2, 2, 4]
]
],
[None, 8, 11, 12]
),
'2': (
[
["NumMatrix", "sumRegion", "sumRegion", "sumRegion", "sumRegion", "sumRegion"],
[
[[[3, 0, 1, 4, 2], [5, 6, 3, 2, 1], [1, 2, 0, 1, 5], [4, 1, 0, 1, 7], [1, 0, 3, 0, 5]]],
[2, 1, 4, 3],
[1, 1, 2, 2],
[1, 2, 2, 4],
[0, 0, 0, 0],
[0, 0, 4, 4],
]
],
[None, 8, 11, 12, 3, 58]
),
'8': (
[
[
"NumMatrix","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion","sumRegion","sumRegion","sumRegion","sumRegion",
"sumRegion"
],
[
[[
[-5208,1041,-93779,-64152,17850,29055,-63731,-23568,41170,58457,-39616,55683,-51662,-75015,21726],
[4535,-72412,86878,-60825,67088,48794,-23471,-22403,58200,-31153,-94668,-27274,-11003,33894,-66125],
[-9538,-33861,54822,42636,48430,-56030,-33348,-30617,5219,56501,-95879,-73537,-18157,-72815,-40977],
[15602,40115,-32475,99011,47251,84035,83793,-74389,-99042,65460,11671,-95294,68311,47893,71866],
[69607,57288,55022,36610,-75113,31344,34319,-13381,-74800,-71904,-15625,-5398,-29689,-68805,-41994],
[-32276,95017,-96452,-47311,13238,46324,95358,13247,-30930,5815,-36748,-25712,-83982,29391,-73922],
[-29140,-70403,-3168,12219,-4473,-10013,-85502,87222,-44858,66506,-99821,-16992,-80758,59210,87145],
[-9557,67725,-27359,-28647,46781,-67948,-28154,-3498,91489,-3887,-96422,6568,42380,73264,-55406],
[40555,70153,-51490,-14237,9684,-54000,-8443,-32063,-96157,-70083,-7050,56221,93013,-1157,-45593],
[-28686,-54296,628,11189,18227,-64455,-10528,-69244,94796,-39806,69194,45024,-14417,-51291,6387],
[-28485,36898,97259,-83875,83650,-36715,80692,-55055,40025,-69379,-1548,-13045,23318,79349,-42774],
[82645,17721,84052,-35036,-751,90269,-77187,51972,-90217,-5956,-34552,95560,40436,51650,72778],
[-970,77788,10423,-1406,-90844,6732,-60197,59393,-82111,33737,-4731,-52679,-12011,69741,-91931]]
],
[3,2,12,6],
[11,10,11,12],
[7,7,7,10],
[7,10,10,13],
[2,11,5,12],
[10,8,10,12],
[12,7,12,10],
[1,14,9,14],
[11,11,11,13],
[7,7,9,10],
[12,8,12,12],
[1,4,6,11],
[0,9,9,13],
[9,6,9,13],
[10,14,11,14],
[4,9,7,14],
[5,13,7,14],
[12,0,12,14],
[9,14,11,14],
[2,8,10,13],
[3,5,12,8],
[5,3,11,10],
[1,14,11,14],
[8,2,11,6],
[3,13,12,14],
[4,9,11,12],
[7,1,9,2],
[0,0,8,14],
[11,8,12,10],
[1,1,10,13]
],
],
[None,82331,101444,-12318,303401,-263458,-20629,6288,-158619,187646,-162731,-117795,-398560,-561164,23728,30004,-436786,119682,-139066,36391,-474370,-277877,-516652,-128615,38933,175801,-278739,5361,-702643,-183830,-279081]
),
'10': (
[
["NumMatrix","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion","sumRegion"],
[[[[-2,-8,9,9,-8,-8,5,7,-8,9,-4,0,-1,-4,5,7,-6,5,-9,-5,-4,-6,-4,-7,5,-7,5,-5,-2,4,6,-9,2,8,8,7,-1,-8,7,-1,-7,-3,8,-6,-8,1,-5,6,4,-2,5,6,-7,7,1,-5,1,1,9,-7,4,-5,1,3,-9,8,5,-3,0,-3,4,-4,-3,-5,-4,-1,2,3,-5,-5,-8,3,7,-4,8,-9,-6,-5,1,-7,-1,-1,-3,6,-1,2,9,-5,3,-6,2,2,9,-9,-1,6,-4,-7,1,-7,-2,8,-2,-5,1,-2,-3,3,-5,8,2,9,-2,9,6,-8,2,5,-4,-2,-4,3,8,4,6,4,-2,-7,2,4,-2,4,8,8,1,7,7,5,4,7,7,5,-7,8,-2,-9,-5,5,-9,8,2,8,-4,-7,-4,9,4,-5,-3,6,-3,1,0,-7,3,-3,-7],[-7,-4,6,6,3,-9,6,4,-8,-4,6,3,2,-8,-7,3,-9,6,1,-7,5,-9,9,-7,3,4,9,-8,-4,4,-5,-1,1,3,2,0,0,9,-9,-9,3,-4,2,-9,2,4,1,-2,-4,-9,0,-6,-3,1,5,-8,0,-4,-4,-3,6,5,6,4,-9,5,1,3,-7,-7,8,-6,5,7,9,-4,-9,-7,-7,9,-1,2,4,0,-8,2,4,-8,2,-3,-4,-7,8,7,5,7,-5,-5,-6,4,2,-7,9,-7,3,3,-7,7,-3,6,-1,3,-4,0,5,-4,4,3,-4,7,-9,-8,-3,-2,8,9,5,-2,8,0,7,4,2,1,5,8,6,2,-1,-9,8,-3,3,0,-3,-7,-1,8,-9,-6,0,2,6,6,7,-2,0,1,-1,-9,-8,0,-2,-9,0,3,2,-2,7,-1,-9,-6,-6,7,-8,6,5],[-9,6,3,-9,-8,2,9,5,-1,-8,7,-6,8,-9,-2,-8,4,-1,6,-3,2,7,4,-7,-9,-4,1,-1,6,-4,-9,-7,9,-7,8,8,9,-4,8,-9,1,9,-8,5,1,7,7,6,-8,-2,-2,0,-3,-4,-2,-2,-4,2,-8,1,-2,-3,-9,4,0,7,6,5,0,4,-4,-1,8,6,8,-5,6,-9,-9,-7,-9,3,-5,0,1,-2,7,8,5,6,0,6,6,-1,9,-5,5,-9,9,3,-7,9,-4,5,-8,8,3,4,4,-1,7,-6,-8,7,-6,5,-9,-7,-1,7,-1,-6,3,-9,0,6,4,6,7,4,1,6,-5,7,-2,4,8,-5,4,-9,-1,-4,8,2,-3,0,9,0,4,-4,-9,0,-4,-7,2,0,3,2,-6,7,2,6,-6,-9,5,-3,-1,3,3,5,-8,-8,8,8,9,2,-5],[7,7,8,-7,5,1,-5,4,-8,-8,-8,7,-8,0,-1,-1,6,5,4,8,-1,-5,3,0,-8,4,-2,3,8,8,0,0,7,5,4,-8,-3,3,7,9,2,-1,-6,8,1,-1,1,-1,3,9,-6,-6,-3,-9,1,6,-7,-1,-8,-4,-3,5,0,5,-8,8,6,-4,-6,-6,-8,9,4,9,-9,-2,8,-4,8,7,-5,-5,8,5,-8,-9,9,3,-3,0,-1,-5,-8,3,3,-8,4,-6,0,-5,-8,-8,5,-1,-4,-4,9,7,-9,-7,8,-5,5,-6,-7,8,2,6,2,-7,-6,2,1,7,4,-2,4,5,-3,7,-8,3,7,2,4,0,8,8,-3,-4,-9,-9,1,8,-3,-1,2,8,7,-3,7,4,-4,4,-4,-8,-6,-2,4,-1,-8,2,-2,-4,2,1,4,7,-5,5,-3,2,-4,7,4,-2,7],[-3,3,4,-9,6,9,0,-3,-4,-4,0,-6,-7,3,-9,8,6,-1,-5,8,-9,-7,7,2,3,-7,-4,-9,0,5,2,5,-7,7,7,-5,9,-3,3,1,-9,-3,-3,7,-1,6,-5,0,4,9,-2,-2,-9,8,3,4,-6,-9,-3,1,8,-3,-9,0,-9,0,-5,-3,9,-8,7,-5,3,8,2,9,-6,-6,-1,-7,6,9,-7,0,9,1,-8,-6,-2,9,3,2,9,-8,3,7,-2,-5,-1,5,0,8,2,-6,1,0,-1,9,7,-5,5,5,-6,-1,1,4,-4,-7,-9,6,-7,-5,9,2,2,-9,3,-3,6,-2,8,0,0,-7,2,5,7,-4,3,1,-8,3,4,-2,-6,-4,1,2,-1,4,-9,6,-5,-4,3,9,6,3,6,-8,0,-2,-9,9,-5,0,6,-9,5,-1,3,1,9,2,-6,1,-7],[0,-4,3,-5,-5,-3,-9,1,5,6,3,4,-5,6,-8,5,-1,-3,-9,2,8,-1,9,5,8,8,-8,-6,-5,6,-5,5,7,-8,0,-6,3,-4,6,4,7,2,-2,6,-9,-6,7,-1,-2,9,-5,-4,-2,7,-8,-7,8,-9,-4,-2,4,8,3,-5,-8,-5,-9,0,-3,2,-2,3,-6,-6,5,-9,3,-8,2,6,7,0,9,-8,-1,7,9,6,-8,5,-8,9,-5,0,6,-6,2,7,9,2,-3,-4,-6,-2,2,-1,-6,0,-8,-6,-3,3,6,4,1,-6,3,-2,7,8,-6,-7,-2,-1,-4,-2,-7,-4,4,-2,7,-8,2,-8,0,4,1,-9,3,7,-9,-5,-8,-2,-5,5,-6,1,-2,-4,-7,-4,-1,5,3,-5,-7,5,2,1,-6,-9,-2,4,0,3,-3,8,-3,-2,-7,4,5,-3,5,8,-4],[2,0,8,4,0,1,-9,6,3,-4,-7,-8,-4,2,-1,2,-7,1,-7,-8,1,-3,-7,-9,5,-2,1,-9,5,1,-1,4,-9,-5,-5,3,2,-5,-5,7,-8,-3,5,-6,7,4,-5,-5,-9,-9,-3,9,5,-9,-6,-8,0,-5,-4,9,4,-5,9,-3,-3,-4,-2,5,-6,9,-4,-4,-4,-4,5,-5,9,4,-9,6,-4,-9,-3,-5,4,0,2,-1,3,-3,-7,-8,7,4,-7,0,4,-5,9,-7,7,-7,-7,-6,-7,-5,-8,1,-4,-3,7,9,1,-7,-5,-2,-2,5,0,1,-7,-4,-3,-3,-4,-3,1,6,1,-2,-4,-1,-3,0,-3,-9,4,-7,-5,-5,-4,7,-8,-4,-6,1,8,8,-4,2,-1,0,2,-7,-9,5,9,-5,-6,6,4,4,0,-3,-2,-1,8,3,-5,4,5,9,-4,8,0,7,-6],[8,-3,0,5,-6,9,3,-8,-9,-6,9,-5,6,-7,5,4,1,-8,6,7,-2,-1,8,-8,6,6,-6,0,-3,-6,-1,8,5,-2,3,-4,7,-6,-8,-3,3,4,1,-5,2,-8,-2,-8,9,-8,-3,-3,0,-5,-8,5,-3,-6,7,-7,-1,5,-3,5,0,7,8,1,5,-9,4,-7,-2,-4,7,6,-7,-7,2,9,0,6,-3,8,7,-7,6,8,9,-8,-4,6,-1,-3,2,-6,-7,-4,0,9,5,8,-5,-3,7,4,-1,-3,9,2,-5,-4,-9,-9,-3,6,3,-7,-4,-4,0,-6,7,-9,0,-2,4,4,-1,3,3,-9,1,-7,1,-1,1,1,1,-5,7,0,4,4,-4,4,9,2,-7,4,2,-7,-6,-8,-7,7,6,0,0,-8,8,8,0,-5,-3,4,-3,2,3,-6,6,3,4,-9,-2,-3,-5],[-8,3,-7,1,-8,-9,7,9,3,-4,0,0,-3,0,9,-4,8,8,-5,-3,-8,-8,7,5,3,-6,-2,1,-6,8,1,9,-2,-9,-7,4,6,5,-7,-3,5,-8,-7,8,-6,1,-5,1,-8,4,-1,-7,5,3,5,-8,-1,-9,3,-7,-4,8,-8,2,-3,9,2,-4,-2,8,-4,6,1,5,-3,3,-9,6,6,2,-3,9,-7,0,-4,3,-2,2,-2,-4,4,-1,-5,-3,-1,-4,3,-7,9,-4,8,7,9,-2,-6,-4,2,-7,-6,-3,-7,1,0,-5,0,8,4,9,-2,1,4,-9,5,-7,-7,-9,-7,0,9,7,2,-2,-8,-4,2,7,-2,-8,0,4,-4,6,-5,8,-7,8,-3,-8,-9,6,3,-1,-7,-1,6,-4,3,-8,-2,1,4,0,-8,-2,-5,-5,-3,1,-3,9,-6,9,-2,-8,-4,-5,4],[2,-4,-7,2,-7,-5,-3,8,8,7,5,-2,4,0,-3,5,5,5,0,4,-4,-9,-3,3,-1,-4,-8,-4,-9,8,-1,3,-3,9,7,0,-5,3,2,-5,-8,-9,-2,-3,-8,-7,1,-6,3,7,-7,0,1,1,-3,-2,0,-8,7,9,-9,-7,-3,9,-2,-4,-3,-8,4,3,-2,-6,5,3,2,-5,-2,-9,-2,4,-9,-1,6,-3,4,-3,-4,-7,-9,-6,8,-8,-9,-5,3,-6,-5,-3,-8,-8,-7,4,2,8,1,-2,-8,-8,-2,3,-2,6,8,-8,7,0,4,2,1,3,-7,3,9,-5,6,0,-5,-1,-4,7,-1,5,-1,7,-6,0,-6,7,1,-8,-2,8,2,9,5,-9,9,8,8,-8,7,6,-5,-5,-2,2,-9,-7,-5,4,-8,9,3,-7,2,2,-3,-7,-1,0,1,-1,-5,3,7,-6,-6],[8,-5,-5,-7,8,-8,-7,7,-4,-7,-3,6,2,5,-4,-8,6,6,1,-4,-1,0,3,3,-9,9,9,-5,-8,2,-6,-4,-3,4,9,-8,8,-6,3,-2,5,5,-3,-2,-6,-6,2,-2,7,0,-2,9,8,-8,1,0,-4,0,5,0,-7,4,-2,-7,2,-3,7,-4,-9,1,2,-9,1,-2,-6,3,1,8,5,8,-5,2,5,6,-1,9,0,6,-4,0,-9,4,-2,-9,1,-2,-6,-3,-1,6,9,-5,9,2,5,-3,-5,8,0,-7,8,2,7,5,9,-2,-3,9,-1,1,-5,7,0,0,-9,-2,1,1,-7,2,1,-5,-9,-4,3,3,-7,2,6,-7,-6,6,-9,-4,0,-2,-6,-3,2,-3,0,-3,9,-9,-4,-3,-4,-3,1,-9,-7,-7,3,-9,-5,8,-9,-4,3,-1,-2,-2,9,1,2,-9,-1],[-9,0,0,-1,-4,-5,-2,5,7,9,0,8,0,7,6,-9,5,-6,2,4,-7,-8,-6,-5,-4,-1,-5,1,4,4,-7,9,3,3,7,4,-4,7,-8,3,1,-4,-1,4,-2,5,-4,1,-4,4,4,9,-6,-3,1,-8,8,7,8,2,-6,-2,7,-9,-8,-8,8,-1,-8,2,-5,-6,-5,6,-6,-1,9,-7,1,-3,-4,-1,4,-3,-6,6,-6,-7,5,5,-9,-7,8,2,-7,-2,8,-5,0,-1,8,-7,-4,-5,2,8,-3,1,1,-5,6,-2,-5,-4,-6,3,-2,4,-6,2,-7,5,-6,1,-3,5,5,-9,-2,-4,-8,-9,7,1,4,0,-4,3,-8,4,-4,-5,1,2,5,8,8,-3,8,-3,-1,7,-6,-7,-9,-3,-2,9,7,-5,7,-4,-9,3,-4,4,0,9,0,2,-1,3,-7,-8,-7,-8,-8],[9,2,-5,-4,8,-5,-4,-2,7,0,-6,4,1,-1,7,-9,2,-7,6,0,-6,-2,1,4,7,-6,1,1,7,1,-9,3,8,4,1,7,5,-3,2,3,-4,-5,-2,9,9,-8,0,-3,7,-6,1,5,8,-2,-4,7,-8,-5,-9,-3,-7,-9,7,-9,-9,-9,7,-7,-1,1,1,0,9,1,3,5,5,-3,2,4,-2,-9,-6,0,3,-2,-3,-3,1,-5,1,-1,7,7,6,5,-8,-1,-2,0,-7,-7,-1,-2,3,-6,-3,-3,5,-3,-7,-4,5,7,2,4,-4,9,1,-8,5,1,-4,-4,-5,3,5,9,7,-6,4,7,7,-8,-5,6,-6,8,-5,1,6,-1,9,-9,-5,3,9,1,7,5,5,3,2,-8,-3,-4,7,-6,-2,6,7,4,-7,2,8,3,8,-9,-7,3,-4,6,7,1,8,3,3],[4,-8,-8,-8,5,-8,1,-8,3,-4,4,-8,5,2,-5,-6,-5,-5,0,-2,-2,3,9,-7,-5,-8,-2,-6,6,-8,0,-7,9,-6,1,-1,1,6,-2,8,-6,-3,7,0,3,-4,8,-1,-5,3,-8,9,-6,3,-3,-2,2,8,-8,0,4,8,4,3,8,-8,-5,-6,-9,-4,2,9,5,-2,7,4,6,-9,-9,-1,5,-2,3,0,-9,4,-5,6,-8,-5,9,-9,1,-1,-4,1,-6,2,-7,-3,9,2,9,-2,7,-7,6,-8,1,9,1,7,0,9,-4,-1,3,-9,-7,8,-5,8,3,1,5,2,9,5,9,-4,-5,4,0,-8,-9,-7,-2,-9,4,-5,-4,-3,0,-8,-8,7,9,7,-5,2,-3,-6,4,4,2,3,3,4,1,0,-9,2,-3,-3,-4,8,-4,-4,5,-8,9,4,9,-7,-7,9,1],[4,-9,5,-5,-4,-1,-7,-6,8,-2,2,3,-8,3,6,-4,6,9,-3,9,-7,-7,-8,3,7,8,0,4,-3,3,9,7,-1,-9,7,6,-7,-2,9,6,-1,-4,-5,0,0,4,-9,3,-5,1,-8,-2,5,-1,2,2,7,6,2,-8,0,-9,8,6,-4,3,8,-1,-9,8,-5,5,-8,-1,-6,8,3,0,-1,8,6,0,6,-5,5,-2,1,1,3,0,-3,0,-4,-4,-6,9,-2,-2,1,-4,3,-6,-8,-5,-3,5,1,0,2,4,6,6,0,1,4,1,-9,-9,4,1,4,-2,-3,4,0,-5,9,-8,6,5,-5,8,0,-1,-5,-1,6,-8,3,1,7,4,-7,1,5,8,3,-1,-8,-2,1,2,8,6,-8,-3,1,4,5,-6,-5,-6,3,-3,8,-1,1,-6,-1,-5,-5,9,-4,-8,1,-9,7],[9,-8,-7,-6,-6,-1,3,1,-2,2,0,-9,8,-7,-3,-8,7,2,-4,1,4,0,7,1,-7,-7,5,9,4,5,-1,-2,0,6,-6,-9,1,-9,0,0,2,-1,4,9,5,-7,5,-1,-2,8,1,4,-5,3,8,0,3,8,-7,3,2,2,-3,7,-3,-8,-3,-1,-9,-4,-4,-7,-9,5,-1,-1,0,7,-3,-1,-3,-5,3,5,-1,-1,-5,1,3,7,-9,4,-9,8,0,3,0,8,-9,-9,-1,-4,7,-2,7,-2,1,5,-7,-8,7,4,6,-2,7,5,4,8,0,8,-2,4,3,-9,1,1,-9,2,-6,-9,-8,-1,0,-1,1,6,4,9,-6,9,3,-5,-2,6,-9,8,-1,3,6,9,4,2,-2,-6,-5,-3,0,-2,-4,0,-5,1,-7,-1,9,7,-3,3,5,3,-2,-5,-7,1,-5,6,9],[8,-9,-2,9,-5,5,3,2,7,4,-2,5,-3,6,-6,9,-8,-8,9,-5,-4,-4,6,-1,1,-6,8,6,-1,7,-3,-2,4,-8,-2,9,-5,7,-6,7,6,-1,-6,6,1,-7,-5,-6,-6,-7,-4,-3,-8,-8,4,-2,-3,-4,-4,-4,1,-7,7,-9,-1,-4,6,-1,5,-5,8,-4,-6,7,-7,9,-9,-3,5,-5,0,2,-5,8,1,3,2,5,8,-3,-5,8,6,3,5,4,-5,0,-6,-4,7,3,1,-3,5,9,9,9,-4,4,-8,-1,5,-9,-3,-8,-6,-5,4,-3,3,-5,9,-9,-6,1,-8,6,1,-7,9,6,1,4,4,-7,7,-9,0,2,4,-1,-2,-5,5,6,5,5,5,-6,1,3,6,-3,-5,2,-6,6,-6,7,1,-6,-9,1,-8,-9,-2,-5,0,6,9,6,-3,-3,8,0,-9],[-2,4,-7,-1,-1,-9,-6,8,-6,7,-8,9,2,-6,6,7,0,-3,7,-3,-1,6,-7,7,3,-9,-2,1,-5,7,4,5,5,-1,-6,7,5,-4,1,-8,-7,-9,2,-5,-8,2,7,6,8,5,6,1,5,5,-6,-9,0,-1,4,-2,4,-3,6,6,0,-2,9,3,-4,3,-9,0,8,0,-3,8,-6,-6,-4,-3,-3,-3,-6,-3,-8,8,0,0,-2,-2,-6,-9,6,8,5,-6,5,7,-5,0,0,-1,6,0,6,4,8,-8,1,-1,-7,9,-9,-6,-7,-5,-3,3,-8,8,2,9,-7,-9,-2,-3,3,4,2,-9,1,5,-1,4,0,5,-7,-5,8,-6,2,3,-2,6,8,-8,-7,-6,9,8,-9,-3,-1,-6,-9,-5,3,-3,-1,6,8,-1,3,8,-8,-5,-5,8,-5,3,-6,6,0,8,9,-3,7],[-7,6,-9,-1,-7,-9,0,-5,-9,-3,0,2,3,7,-1,8,-3,6,2,9,3,9,8,0,9,9,-9,-5,9,6,-5,4,2,-4,-7,-5,-9,2,5,-8,5,-6,8,2,3,-9,8,-5,-4,-4,-2,-5,3,-4,3,3,-2,-7,5,7,-6,-3,-4,0,7,-3,5,-3,-6,2,-2,5,7,2,8,1,7,6,5,5,-5,-1,4,8,2,-2,-5,-8,-4,2,1,-8,-9,2,8,-5,1,8,-4,2,1,0,-6,-7,-6,-3,4,1,3,5,-3,9,4,-7,-6,-9,8,1,-2,8,-3,-9,-7,-6,-8,3,7,4,-3,-6,-5,4,9,9,8,8,-8,-8,-9,-4,-2,7,7,-9,-4,-5,4,8,-4,-5,-4,2,5,-5,4,8,0,-2,1,3,-7,-6,-1,-1,-9,-1,-1,2,-2,-3,-8,-4,-4,4,2,-6,-2],[4,-9,8,-8,2,-5,9,0,5,-1,-8,1,-5,-9,1,-3,-8,-8,-2,-5,1,-1,-9,1,3,-8,0,0,-5,-4,-3,-2,2,2,8,0,-8,-4,5,7,-2,-9,-8,-5,2,5,1,5,3,-7,9,9,8,5,1,2,7,-9,-1,9,8,0,-3,-1,9,1,-4,-6,7,-1,5,-1,4,9,7,-6,7,-8,5,0,-8,-2,0,-9,3,-8,7,-3,2,4,4,4,9,7,2,-4,-2,7,2,5,-9,2,-4,-4,8,3,-8,8,6,-5,7,0,0,6,-3,-7,-5,-1,1,-2,9,7,-2,9,1,-3,-5,-2,-9,-6,-6,-1,-7,0,0,2,-3,-5,5,-9,-8,-3,-6,0,0,0,7,-1,-2,4,-1,-2,7,5,4,9,9,9,-1,-3,8,-5,6,5,-1,0,0,3,2,-3,-8,-2,1,-1,-6,-3,5],[-6,0,1,-9,-8,9,-6,-1,-4,7,-1,-7,0,-4,3,3,-2,4,4,1,-5,0,1,-9,-3,-4,5,3,-6,-6,1,6,7,-6,8,5,-3,5,-5,5,7,7,-2,8,-2,3,-7,-2,4,3,4,-7,-8,-7,6,-4,-7,-9,-9,3,-6,8,-5,4,-8,-8,8,-1,-5,2,-2,-5,5,2,-2,-4,-1,8,-7,-4,9,-5,-1,-1,7,-9,0,0,-3,-5,1,3,-9,-1,-7,-6,-2,7,3,-6,6,5,9,-3,-2,-3,8,0,-8,-5,5,0,-1,-9,-8,7,0,2,3,1,-6,4,1,-8,7,6,8,-2,-1,-5,-2,8,-4,0,8,1,9,-9,6,8,2,5,0,-6,-1,4,8,-5,9,-7,6,-4,-6,-7,-9,-3,-9,4,-1,1,-4,-8,-3,-9,5,-4,4,-7,-2,-9,-1,-7,-4,0,3,-3,0],[0,7,8,9,6,-3,-1,7,1,-7,-2,8,4,5,-9,4,-3,8,3,0,-8,-8,5,4,6,-9,-4,4,-5,-9,0,6,-8,4,-3,-2,-9,-2,-3,-9,5,2,-7,-4,-8,9,9,7,-6,9,-9,2,-5,-5,6,9,5,-8,-4,4,-1,-7,9,2,-8,1,-6,-7,-5,0,9,5,0,-2,-3,-1,7,-3,3,-2,-8,4,5,1,-6,-8,-6,-6,-7,3,7,7,3,0,-4,-3,-2,-5,7,2,7,-3,2,-5,8,-8,-8,5,9,4,-5,4,-7,-6,-5,2,-7,0,3,-5,-1,6,-2,-5,4,2,2,-4,3,2,-3,-7,-6,7,7,-5,-7,4,-7,2,4,-7,-8,8,7,-5,-2,0,0,7,3,-8,-9,8,6,7,-8,5,-5,1,-2,2,4,6,-8,-9,8,-2,-1,-9,6,-3,3,-6,9,2,3],[-4,-2,6,-8,-5,-3,-7,0,4,4,-3,-3,-9,3,-9,0,7,-5,-8,5,7,0,2,-7,-8,-9,1,6,-4,6,-1,-2,1,-8,-1,-6,7,0,9,8,-3,-8,1,-2,2,1,3,-7,-6,-2,-3,5,6,4,2,3,-8,6,-7,-3,-7,-9,-8,4,3,-2,2,-8,-2,7,-3,0,-5,-5,1,6,-8,3,8,2,1,-6,-6,-4,-4,-2,8,1,5,9,8,5,-4,3,-7,3,-7,-4,9,0,-5,9,5,4,-4,1,9,-6,-7,0,-2,6,-8,7,-5,-2,-5,-4,9,2,-8,1,-3,4,9,5,-8,-5,1,2,5,9,2,3,-3,-5,-5,-7,2,6,-2,-7,7,4,-2,6,-4,-3,-3,-3,-6,-8,4,3,3,4,2,9,-5,2,-7,-1,-3,8,-9,2,5,1,-2,3,7,-8,-3,-3,6,2,-8],[2,9,-8,-4,5,-4,-1,8,7,4,-6,-9,-1,3,5,0,1,-1,8,0,-1,-4,2,-9,-7,5,3,7,0,9,-5,-9,-7,-4,0,-2,-6,-3,-1,4,1,-8,-4,4,9,-1,9,2,9,3,9,-9,-9,4,-1,-8,-5,0,3,3,4,5,2,4,7,1,0,-9,-2,-2,2,4,9,-4,-2,-9,5,2,7,-5,5,1,-7,-7,-5,5,-9,-3,-5,-3,0,-9,6,-1,-9,-3,7,-6,1,6,1,6,-7,8,-2,-1,5,2,-7,8,-2,1,-7,-9,7,4,7,9,3,-4,3,9,7,2,3,7,9,8,-9,2,6,-6,-8,1,0,-7,8,-6,9,-9,1,-9,7,-2,8,4,-9,0,-2,-6,5,-2,-8,5,3,-5,4,7,5,-4,-9,3,-5,-1,4,1,9,0,5,-2,1,-8,8,2,7,0,5],[2,-7,-7,8,-4,1,-7,-6,-3,-5,-8,-1,-2,6,2,9,-9,-1,1,8,-8,9,0,-7,-4,-8,9,-7,-4,4,-1,1,9,-9,5,-7,-4,-7,8,2,2,-7,9,3,-1,4,-5,-8,-4,7,7,-8,-8,3,-2,-9,6,-1,8,6,-4,1,-4,2,-9,-6,0,-5,-9,-1,5,3,2,-4,-2,-3,2,8,4,-9,-4,-8,-4,-6,4,2,5,-5,-5,8,-8,7,-5,-6,9,-3,-7,1,7,-5,9,9,3,6,-4,7,1,5,-5,1,-6,6,-6,4,1,-2,6,-3,3,-7,9,0,2,-9,-8,0,-6,-7,2,-1,-5,1,4,-4,-8,9,2,8,1,-2,0,-8,-8,-1,-3,-6,-6,4,-6,-7,-4,-5,-8,4,4,1,-1,7,3,-3,0,4,1,-5,2,2,-8,-1,0,7,1,-7,8,5,-9,-8,4],[-3,-1,8,-9,-5,-8,8,0,1,1,3,4,1,6,2,-1,9,-1,7,-4,-7,-2,8,8,9,4,-3,-6,0,-6,5,4,8,-4,-3,-6,0,9,-6,-5,7,-5,-3,8,7,7,-7,4,3,-6,9,-4,1,7,-1,-9,-2,-9,0,4,-9,5,-3,-6,9,6,7,-3,4,-1,7,-5,-1,-5,-1,-3,-8,5,9,-2,-7,7,3,9,6,-7,0,9,-9,-8,-3,-1,-8,-5,-7,-1,4,-4,-9,8,9,-1,-3,-9,8,4,-9,2,3,7,-6,4,9,6,-4,8,-4,0,-3,-5,-8,0,0,3,2,6,-6,1,-9,-5,-8,9,8,-6,2,0,-6,-8,-4,8,-8,-3,2,-5,-9,2,6,-8,-5,1,-6,6,2,3,-3,-4,4,8,-9,-6,4,3,-8,2,-1,3,-7,-7,-9,-1,6,1,7,-5,2,-3,-7],[9,7,-6,5,0,8,3,9,-8,-4,2,-5,7,5,-9,2,6,7,-3,-3,1,9,9,5,2,2,6,-6,2,7,5,-9,6,-1,-4,8,8,0,-1,7,6,2,-1,-8,2,-4,9,7,-3,8,-9,-5,3,8,-9,5,0,-5,-6,-3,9,-1,1,-4,-3,-4,-4,1,2,1,5,-6,-3,-1,-6,5,-2,-1,3,7,-9,2,1,-8,5,-2,-4,7,1,4,6,9,4,9,-7,8,-4,-6,-7,-6,-9,0,9,-2,8,3,9,-6,-8,3,-4,-4,5,4,-6,3,2,-8,-5,3,1,-6,2,-8,-8,1,-3,-1,4,4,-4,9,-5,-1,-1,-1,-6,-5,-9,-7,-6,1,7,2,-4,8,5,-1,0,0,6,8,7,1,-6,0,-8,7,-9,1,-6,3,-1,3,0,8,-2,-2,6,-3,-7,-4,3,3,3,6,-7],[4,1,2,9,2,-3,9,-9,-6,4,1,7,-8,-8,-7,5,8,0,5,9,-2,-9,9,2,-6,-1,-1,3,2,1,-3,-8,1,0,-8,-4,-8,2,2,2,-6,-1,4,-1,4,2,-9,-4,5,-6,-9,-2,6,-3,4,-8,7,5,8,5,-5,4,0,9,4,4,-9,-9,6,9,-3,-1,7,-8,0,-2,-9,7,-5,-5,-3,2,-9,0,-7,2,-1,5,4,-2,2,6,8,-3,8,5,8,-4,3,7,-2,4,-6,1,-6,4,-2,-1,7,2,7,4,8,0,-3,4,9,9,-7,-5,0,2,-3,-5,1,-7,5,-2,-3,-9,7,-6,7,-2,4,7,-3,-9,-2,8,2,-3,4,6,-2,-2,-8,7,9,-6,-3,9,-8,-6,-4,-1,-2,-2,-6,0,-1,8,-8,-8,5,2,-2,-9,4,-1,-1,4,-5,-8,0,5,5],[-4,5,-5,-7,-1,0,8,-8,-9,3,-8,-2,7,4,-3,-8,8,-9,7,9,1,-8,7,7,-8,-4,-2,2,-3,9,-3,-5,6,-8,2,-8,-4,-1,-3,8,1,4,-1,-5,-9,9,-6,2,-8,-4,-7,9,-4,-7,-1,-7,-5,3,7,8,8,9,5,7,-3,1,-9,-1,-4,9,2,0,2,2,8,4,0,-2,7,-5,0,4,-6,8,9,2,1,2,1,-9,3,5,6,-2,-2,-3,-5,9,1,6,3,6,2,-8,-4,-2,-8,-6,2,6,0,-9,8,9,4,-6,-8,-7,4,2,-8,-9,-3,9,-5,-4,-6,-5,9,-5,-6,6,7,-1,-4,7,-7,-9,0,7,5,6,9,-6,2,9,4,7,-8,-4,9,-8,-7,8,-1,-7,5,4,-5,8,3,-7,-5,-6,0,-1,4,-4,-8,9,-7,1,4,2,-8,-5,7],[6,6,-6,-1,2,-6,9,2,9,6,6,-1,6,-6,9,0,4,2,9,-7,-5,2,-4,2,-9,1,-9,8,-4,9,2,-4,8,5,9,-7,-8,-5,-5,-1,-8,4,-8,-4,8,-8,0,-2,0,-5,6,5,0,9,-9,-4,-9,-5,1,1,5,9,6,-7,6,-3,-4,-8,6,-8,-9,-7,2,-3,-5,4,2,0,2,2,-7,-2,-5,2,-4,-5,-3,-5,-7,-8,-1,-9,-9,-3,9,-6,-9,-6,-3,-6,6,-6,-4,-7,-4,-3,-2,-1,-1,4,9,8,9,9,5,2,-4,-8,5,0,-6,-2,-2,-9,-3,-7,2,8,9,-7,6,1,4,-2,3,-8,4,8,1,-1,-7,4,6,-1,-8,4,-9,4,4,6,5,-9,6,1,4,6,2,-7,-3,-4,-8,3,-8,-3,8,4,-9,-7,-8,1,-7,-5,-7,5,-6,-2,9],[5,-2,-1,1,-9,-6,-8,2,-5,-2,-1,-1,-7,5,-6,-9,-6,0,7,9,6,-9,8,4,4,9,1,5,5,-9,-3,4,1,-9,7,8,-7,6,4,-1,-9,-8,9,8,4,-7,2,6,-3,1,7,-4,-6,3,-7,0,6,-8,7,-5,-1,8,9,-6,6,8,-7,4,-1,5,-9,-9,3,5,8,5,5,-6,1,-4,5,9,-4,1,-2,-4,-8,-5,6,6,6,2,5,-7,0,3,2,9,-5,-2,-1,-4,-5,2,4,3,-6,-9,8,7,-1,-2,9,-8,-2,6,9,0,-5,1,-3,0,-5,-3,6,5,7,-7,2,-2,-6,-8,1,-9,-3,2,-9,-5,-3,3,-7,-5,4,-7,0,-2,5,-6,-3,-8,4,4,2,-2,-6,-4,6,-2,-2,5,8,9,-1,8,7,-2,2,2,0,-4,5,1,-3,4,-5,-1,8],[0,-5,-4,5,-8,6,-9,3,-4,1,-3,0,-9,3,-8,6,0,-8,8,-7,-1,6,-7,7,-2,-3,2,-7,-2,0,-2,6,-8,-2,-9,2,2,-9,-1,-5,-5,8,-1,-3,-5,4,9,-9,6,-3,7,-9,-5,-9,-3,-3,-6,-7,-1,-5,-8,-3,9,4,2,-3,-1,7,4,-5,3,-1,8,1,-5,-7,-7,4,-4,-1,-8,6,9,-9,-2,-4,-2,7,5,-9,5,-3,-9,-6,-4,-3,7,4,8,7,6,-9,-4,5,7,-5,-7,-5,4,-2,2,3,-4,-7,0,-9,-2,3,8,-7,7,2,-3,-5,-2,-1,-4,2,6,-5,1,9,3,2,-9,9,-9,7,-9,4,-3,4,4,-7,-8,0,3,7,7,-4,2,-7,1,-5,6,-7,-3,4,-3,-1,3,9,-9,-3,-7,-5,5,-6,-9,-1,-6,2,-1,9,0,-7,-5],[2,3,0,2,-3,-8,6,7,6,-2,-7,-1,8,7,4,-8,-2,-6,-3,7,9,4,-4,-7,-4,-5,8,-1,0,-3,5,-7,9,0,7,5,-4,-6,-7,-1,-7,8,-7,-7,2,-2,4,-1,-4,5,2,5,9,-5,0,-9,-4,-2,-1,-3,5,-1,-8,-6,8,-5,9,-3,-4,5,-8,8,-3,-1,3,-8,4,3,6,3,-8,-6,2,1,4,4,3,0,7,-8,-1,-4,-2,-1,6,-7,6,-6,0,0,-6,-5,-7,5,-9,-5,7,-8,7,-7,3,-2,2,4,6,-5,9,1,-4,-6,0,-7,0,3,-9,-4,-7,-5,-2,-5,-1,9,-1,-2,-2,0,8,-3,2,1,5,2,-8,7,3,-5,-6,1,5,-9,-6,7,-2,5,-4,4,-6,3,-8,3,-5,-6,5,2,3,8,-7,5,4,-9,-9,9,7,-8,4,7,9],[-3,-7,-6,9,-9,9,-2,6,3,-7,-8,-5,5,2,-1,0,2,9,7,-6,-5,9,5,-3,0,0,0,-4,-8,9,4,0,-7,5,-3,-7,-4,-2,4,-8,-9,-3,1,-7,1,8,-2,7,0,-6,9,0,-4,0,7,5,3,-5,4,3,3,6,7,6,5,-8,-6,-6,-9,2,-8,2,3,2,0,-4,1,-9,-9,-9,-4,-2,8,2,-2,-9,-1,6,-2,5,-1,-7,-6,3,-3,-6,4,-3,4,-8,-2,5,0,7,3,9,8,-9,6,8,6,-4,3,-8,-9,8,-1,-8,3,-6,-5,0,-5,4,-1,-8,-2,-1,-1,2,-1,-2,0,-5,-6,-5,2,4,4,-4,-6,-7,2,7,-7,-6,0,-3,4,-8,-9,-6,1,8,7,-5,7,2,-1,3,4,3,1,-6,3,-3,-8,-1,-9,-6,-1,-2,-7,4,-2,-3,-5],[7,-8,1,6,-6,-4,-7,7,-2,-7,3,-4,-9,8,9,-3,9,0,6,8,-4,1,5,-2,-2,0,8,-2,7,-1,4,-6,-6,-2,-4,1,-5,8,7,-1,-3,-8,9,-8,3,6,7,1,4,0,9,-3,7,5,-1,8,-1,-6,3,7,-4,-8,0,-5,6,-1,-3,9,0,-9,7,-2,-3,9,-6,0,-1,2,4,2,9,-7,-5,8,-7,2,-5,5,3,4,7,-5,9,2,2,-3,1,-1,-4,7,0,1,-3,9,5,-8,1,7,-1,5,-1,-2,4,8,8,6,5,-9,-7,4,4,5,6,0,6,5,2,-8,-7,3,4,3,9,7,-9,5,-5,1,4,-9,-9,1,-7,-8,4,-5,-9,-1,-6,0,4,-4,7,-9,2,2,3,9,2,2,9,1,-2,6,5,9,5,9,-9,5,1,8,-2,-4,9,-1,5],[4,9,3,-2,9,-1,8,2,-1,0,4,3,-1,0,5,-7,-1,2,9,1,9,5,-9,-6,-5,8,-8,-5,1,8,-2,-4,1,9,9,-1,-9,-1,-1,5,8,0,-8,8,-5,8,-7,-3,0,-3,-3,8,-8,1,2,-8,-1,-6,-2,-7,-6,-5,-1,1,6,4,1,9,1,6,-9,8,9,-2,9,5,6,-1,-8,-4,-4,6,8,0,-5,-5,7,3,6,-6,-3,4,-3,-1,3,1,-3,9,7,-4,8,-5,-7,-3,-5,-2,0,6,-3,-6,7,-5,-2,6,6,8,9,5,7,-3,2,0,-9,2,1,9,-3,1,4,-4,4,-6,-3,9,-6,-9,-8,-1,4,-8,-8,8,-8,-5,-3,5,0,8,1,-2,9,2,-5,1,1,8,-5,8,-2,8,0,3,5,-5,5,5,3,-6,0,-2,-1,4,-6,-3,2,5,-9],[3,1,6,9,3,-8,1,-7,-7,-8,-9,-8,3,4,-2,-2,6,-7,-1,-8,6,-8,-8,3,-8,3,5,-3,2,1,-3,-1,5,7,2,-8,-1,0,1,7,4,2,-8,4,-5,-5,2,9,-3,-3,6,-2,-7,5,1,-3,-1,-5,-9,-2,3,5,-1,0,-7,-6,-8,-2,4,-2,-2,-1,3,-8,4,5,-1,-8,6,7,-2,1,-5,8,-3,8,-1,-8,-2,-3,5,0,-7,3,-2,-1,3,4,-6,5,-2,-3,4,3,1,7,-5,3,4,-6,5,-1,7,-1,-9,-8,-4,9,-3,-4,-2,-7,6,4,-9,4,-8,-9,-3,-1,-4,2,8,6,-2,3,-1,2,-9,-8,4,0,-7,-8,-7,3,-2,-5,-9,-7,6,-5,-4,-7,5,-2,-1,7,3,5,2,-9,-7,3,-9,-5,3,-4,9,4,1,9,9,4,5,5,0],[-4,5,3,-4,-2,-3,1,1,5,-4,-6,-6,6,5,-9,-2,6,4,-2,-7,-9,3,9,-8,-5,-9,1,-5,-9,-8,-8,-7,7,8,2,-5,-8,5,5,9,-7,2,-6,-8,-6,9,-1,4,6,-4,-6,-2,-1,-4,8,6,3,5,-2,-7,8,3,4,9,3,-3,7,2,7,6,1,-1,9,0,4,-5,7,1,9,-2,9,-8,-1,-2,-9,-5,-8,8,9,4,-6,5,5,6,4,-1,-6,-9,-9,5,3,-2,3,0,-6,-5,9,0,1,9,8,3,-1,9,7,8,-6,-3,-3,2,4,-5,7,-6,0,-2,9,-2,7,-9,7,-8,-5,1,2,-8,-7,-4,1,7,-1,-1,-5,-3,6,-5,-8,-9,-5,9,-4,7,-7,7,-1,-2,-2,4,-2,-5,8,-8,-8,-3,3,1,-4,5,8,6,-7,0,-6,-2,4,6,-9],[6,0,7,9,2,-8,-8,3,7,3,1,1,-2,-6,-9,-3,-3,-5,-9,-6,4,4,-5,-1,8,2,2,-6,9,8,0,-6,7,8,-9,-1,-4,4,-6,-1,3,-1,-1,8,-1,-6,6,9,6,-3,-7,9,7,-4,-1,-6,3,0,-7,-6,1,8,8,2,-9,8,-5,-1,7,9,-3,1,-1,-3,9,-2,8,-9,6,2,2,8,-4,5,-7,0,3,8,-7,-3,-1,-9,1,-8,-8,-9,1,5,8,-5,-9,-2,-6,-3,6,-8,6,2,-2,-5,4,-6,4,1,3,7,3,-3,-4,-3,3,-6,-2,0,-2,-4,1,-9,-1,-7,2,-4,-9,6,-1,3,3,8,-7,-9,1,-2,8,5,-4,-2,-5,-3,1,-9,2,6,6,-4,-1,-9,-3,-9,0,3,4,-6,-2,3,-4,0,6,2,5,-8,1,-3,-5,-3,-1,1,4],[6,-1,8,-9,-2,4,-4,-5,9,-6,-4,8,-2,6,-2,-5,5,0,8,9,-2,8,-6,2,6,1,7,9,-3,-5,-4,2,-9,9,9,-7,-1,-4,8,-2,6,3,5,-2,6,-1,-4,-1,2,7,9,-8,0,-7,1,-8,3,9,2,6,3,6,-7,5,-7,-6,-2,-6,9,1,0,6,2,5,1,5,4,7,9,6,1,8,-5,-1,3,6,8,8,-6,5,-8,8,6,-3,-2,-9,-6,5,-7,7,8,1,1,-2,-6,-8,-2,8,-8,-3,-3,1,3,0,0,8,-4,-3,-9,5,-8,5,-1,1,-5,8,3,5,4,0,2,6,-8,3,4,-6,2,-9,5,4,-2,-5,-4,7,-3,8,-6,1,6,0,1,2,8,1,7,5,-3,2,-8,2,3,-7,-5,-6,1,4,-9,-8,1,8,2,-7,-4,3,-8,-1,7],[-7,2,3,-1,-2,9,4,-3,-3,-3,4,1,7,1,2,-8,-8,0,-3,-5,9,8,-6,2,-6,6,8,4,7,-4,9,-2,-1,-9,-7,-2,3,1,5,-6,7,-6,0,7,2,4,-8,6,-3,-5,-5,-3,6,-8,0,1,6,-7,-4,5,5,2,6,-5,2,-4,9,-2,-4,7,3,-7,9,9,-6,-2,8,1,7,-7,3,6,2,2,-5,-6,2,6,-4,3,-2,6,0,-6,-5,-6,6,-8,5,-1,-6,-3,7,2,4,-6,5,-6,7,2,-9,6,5,4,-1,5,2,7,-6,-6,1,7,-1,-2,6,7,7,6,-8,8,4,5,-9,4,5,-6,-2,6,6,-6,-8,-3,-3,7,-9,-1,1,-1,-4,0,9,1,2,7,4,-3,-1,4,-3,-7,2,3,5,9,6,-1,-1,1,-4,6,-5,8,-1,-3,-9,-5,-4],[7,9,-1,-8,2,-7,-8,2,5,7,-1,-7,-1,1,-5,7,1,4,-5,-7,0,-7,-3,-1,-3,-1,5,8,9,-8,-6,8,0,-4,0,5,4,6,-8,8,-2,7,1,-1,1,2,-7,-2,-9,-7,6,6,-2,-4,2,-1,8,-2,-3,-1,-3,7,0,7,-3,5,3,5,-5,1,1,-5,-7,-9,-9,5,9,2,-7,-4,-5,5,-4,-2,-2,-5,5,6,9,-1,3,-2,-2,9,-5,-6,-1,-2,4,4,-8,-9,7,-5,-5,-1,-4,2,4,4,5,-5,0,2,4,-5,4,0,-8,8,-2,-5,-4,-7,8,-3,-6,-9,-4,-7,6,7,9,2,5,6,-7,-5,-6,-3,9,8,1,7,-3,0,-5,8,-2,3,4,7,1,0,-6,-9,9,-7,5,9,-9,-1,-7,-8,3,-4,0,8,-1,2,2,-4,3,-9,0,8,9],[-3,1,8,5,-7,4,4,1,9,4,0,5,5,-3,-3,-2,9,5,-5,-2,-8,8,2,-8,-1,5,5,-4,-6,3,7,4,-2,-1,2,7,6,-9,4,3,2,6,-5,0,-3,1,-2,9,-6,-8,-3,3,-7,7,3,-7,-8,2,0,-5,-5,3,7,5,9,-5,2,-9,0,5,-1,-9,6,1,-3,-9,-3,8,5,8,-6,-1,2,-3,5,8,-5,2,-9,-1,2,-1,-8,7,-2,2,4,-1,-6,6,-4,3,4,0,5,-7,2,2,4,-4,1,-1,4,-8,6,-2,9,7,-8,-4,-1,7,4,-7,-6,7,1,5,3,9,-8,-9,-3,4,-9,5,8,-7,3,7,-3,-6,-7,-3,-9,-8,-5,0,-7,2,-3,-8,3,-5,9,4,-8,-3,-7,5,-7,-4,3,-6,9,8,9,9,-1,3,4,-9,5,8,9,-1,4],[-9,-3,-5,1,-7,6,5,-9,2,8,8,7,4,-5,-7,-5,1,6,0,7,8,-3,9,4,7,-6,-1,-5,-6,6,8,-9,6,-1,4,5,-9,-2,2,-1,-8,-9,6,3,4,-5,-5,7,-4,0,2,-8,4,-8,6,-4,-9,7,-6,9,-6,5,5,0,4,-9,5,7,5,2,4,-3,-7,-1,9,-7,3,-9,3,4,5,9,-2,4,2,7,0,-1,-6,1,-5,8,-4,-3,-9,5,2,1,-4,-4,-4,-7,-1,4,9,-3,-1,2,0,9,-4,-7,-2,2,8,6,3,9,4,-7,-2,0,-9,1,-8,-9,7,8,-8,-9,-2,3,7,-4,0,3,-4,-3,-1,-5,-2,-3,-3,8,-3,7,-2,-6,5,-3,2,8,-5,-7,7,1,4,4,0,-1,-5,-3,-3,8,5,-2,1,5,-3,0,6,-9,-3,-3,-3,-8,9],[-6,6,-9,-2,9,4,-8,-8,7,-8,8,-6,3,-1,-4,-5,8,-3,5,-6,1,8,6,-4,-7,-7,-5,-5,-8,0,2,7,-9,2,7,-2,2,2,2,-4,-2,-4,-1,4,-6,3,5,-8,7,9,0,0,-3,3,-8,-1,5,-5,-4,-8,-7,-5,-5,6,6,-2,-5,4,-2,-4,7,6,6,-9,2,2,-9,-8,7,2,8,-3,-1,9,7,-2,2,2,-3,-8,-7,-6,-2,9,-4,-2,0,-1,6,1,-5,-8,6,6,-4,6,-4,4,-3,5,0,8,9,7,-2,4,-6,-8,-6,8,0,-4,-6,-5,7,-7,1,2,-3,-6,-1,8,9,-3,-2,1,9,-9,-8,-1,-6,-2,-3,8,-2,-2,-9,3,-8,-5,-9,1,5,-8,9,-1,6,8,8,4,-7,2,-5,9,9,-1,7,-8,3,-1,-3,9,0,2,3,-2,7],[-2,-5,9,5,6,9,1,5,-6,9,6,-2,-1,-6,-8,4,-7,3,-1,-8,-4,3,6,-4,-6,6,9,5,8,6,-8,2,-1,-1,-4,8,-2,6,-6,-4,-7,-3,1,5,1,-9,1,-4,0,1,3,5,3,5,-1,-7,4,0,7,-7,5,-4,2,4,-4,-2,-5,1,-3,5,6,2,-4,3,5,5,-1,-7,-4,5,1,9,6,-3,7,-4,4,8,0,-5,0,3,-9,1,-3,-6,1,2,-1,2,-8,3,3,6,-5,-6,2,-9,-5,-4,-4,9,4,5,4,-9,0,6,-4,7,7,-6,9,-2,-9,9,-8,-7,9,5,6,5,5,3,-1,-1,3,5,3,-8,-3,8,-5,-1,4,7,0,-5,-6,3,-7,7,8,-9,7,-4,-9,7,-1,2,4,-3,-7,-5,0,1,-4,-3,0,3,-8,3,-2,-5,3,-1,5],[9,-1,-1,-1,-4,-3,-1,6,3,-5,-1,8,-5,-9,-6,8,-7,7,-9,1,-1,-7,1,-3,-9,-9,-1,6,3,8,-2,-5,4,2,-6,8,7,0,2,-3,8,-2,2,6,7,-8,1,8,8,-7,2,-6,-3,-3,6,3,-4,-7,-3,2,-8,9,-5,-2,-1,2,-8,-7,0,6,-2,1,-3,-1,4,-1,4,0,-2,2,5,-9,5,3,8,-5,1,6,-2,-9,8,-6,9,6,-8,4,-5,6,-7,4,-9,1,9,-6,6,-8,-1,-3,6,0,2,5,-2,1,7,-5,2,5,1,6,4,-6,6,4,-1,8,7,-1,6,5,2,4,-7,-3,1,-7,5,3,-1,7,5,-9,3,6,3,1,-5,2,7,8,-8,7,9,5,-5,-2,9,6,3,6,8,1,-4,6,2,-8,5,-2,9,1,3,3,-2,-9,-9,9,-2],[4,4,7,-2,-9,-5,9,7,-3,-3,-4,0,1,7,2,2,9,-8,-4,-6,-2,-6,2,-7,0,2,-1,-4,2,-3,0,-7,-9,-2,4,-7,-2,8,0,1,-4,3,-1,-1,9,-8,-6,5,2,-5,-9,9,0,-6,-1,-5,-9,-3,-2,2,8,-3,7,9,-8,7,4,5,-5,9,-8,-4,5,1,-8,-9,3,0,-8,-3,6,3,-2,1,-3,-4,-4,3,-9,-3,-5,-7,3,3,6,1,-4,-3,-2,0,4,6,-7,-3,5,-6,7,-9,4,-5,2,8,2,1,3,-8,-8,-9,-3,1,9,-5,-9,-5,1,-1,-2,6,1,-4,5,-8,1,-2,2,5,-9,8,6,2,7,1,-7,0,7,5,2,-7,-9,-5,8,5,9,-5,-8,7,-6,7,2,1,-3,4,-2,-8,7,3,-9,-8,8,9,4,1,-9,-2,5,7,-7],[9,-3,-6,-8,2,5,-2,-6,7,-1,5,6,6,-2,-7,4,-7,0,-9,2,-3,-2,1,3,4,-4,-6,0,5,5,0,1,-3,2,2,-9,5,-1,-1,8,4,4,-9,-7,-1,2,-5,1,1,-7,-2,-7,-2,1,1,-8,-5,8,5,9,4,-5,6,2,4,1,-4,1,-6,1,-8,-5,-3,5,5,9,5,-3,0,3,5,2,9,-2,5,2,0,0,8,2,4,-1,3,7,-8,9,-1,2,-2,3,0,9,-1,9,-1,2,-6,6,-6,-6,-3,-2,0,2,-4,5,-9,6,7,-8,6,1,7,0,2,7,-2,9,-5,-9,-6,-7,9,-4,4,-5,-5,-8,2,7,0,-4,-2,6,8,8,-9,9,-4,-7,-9,3,1,-7,3,-7,1,-1,-1,5,9,-3,-2,5,6,-2,3,4,-8,-7,3,-1,7,-4,-4,6,1],[9,3,9,2,1,7,5,3,7,3,9,-9,0,8,2,0,3,9,4,-8,-9,-9,-7,-2,7,7,-6,6,-4,8,8,4,-9,-6,5,8,-1,3,0,2,2,7,-1,5,0,-3,-1,-2,9,4,4,8,9,8,7,9,9,6,0,-7,-5,-8,-8,9,2,8,7,-3,5,9,6,0,-8,-8,-3,-8,-4,7,2,-7,-8,2,8,-8,-2,-9,-4,-3,-7,-8,2,2,5,4,-4,5,0,8,-3,-5,3,-6,0,-6,0,8,-2,5,0,-3,0,-1,-2,-4,4,7,2,-6,-5,1,3,-5,9,4,-3,7,4,3,0,-3,5,4,-1,6,0,-6,-2,-6,-3,9,4,-2,-4,3,-5,2,-9,-4,7,7,-5,4,9,-4,8,-9,7,8,3,5,3,1,-2,-8,7,-7,1,0,9,8,-5,-4,6,9,1,-4,6],[6,-3,-7,1,9,-9,-4,-7,4,-8,9,-4,-2,-6,1,-2,-8,-5,-1,3,-4,-2,-3,-3,8,7,-7,3,2,-5,-8,8,-3,1,3,-4,9,7,-1,-2,-9,7,1,7,-7,-4,5,3,6,-9,-2,-3,-5,9,8,-8,2,-1,-4,0,2,-5,-6,4,7,9,1,-3,-8,-8,-1,9,-2,-5,5,7,2,-9,2,7,-5,-1,-8,0,4,-3,7,-8,2,2,2,1,-3,6,-9,1,5,7,-7,-9,-2,3,-7,-4,6,2,-4,4,-8,8,-6,-2,3,2,4,-3,-1,2,-6,6,-1,-7,9,-7,-9,0,7,3,-6,6,-6,-9,5,-6,-3,-6,-6,4,3,0,-5,-8,1,4,6,1,8,0,-9,-7,1,4,-6,-9,4,-1,3,6,5,-7,9,-2,-7,5,-6,2,-6,-7,-4,8,-4,-5,-2,5,4,-1,-9],[-2,-2,-2,-3,5,-8,-9,-2,9,0,-5,-5,-5,2,2,5,-4,4,2,-2,6,7,4,-2,-4,-1,8,1,2,3,1,-1,9,1,2,-2,9,9,-8,-9,-8,-2,-9,9,9,-2,4,-1,8,7,7,-2,8,-6,-1,8,0,9,-3,-6,3,2,1,9,-6,6,-1,8,-5,-8,3,-9,-8,3,-9,-7,7,-8,7,-4,-7,3,-4,-3,4,-8,-5,-6,-7,4,-7,-8,2,-1,-5,-5,7,4,9,-4,0,-3,5,2,-6,5,-2,2,-4,-5,-9,0,5,-6,-5,-6,-9,1,3,-2,3,-6,7,4,8,-8,5,-3,-8,-1,5,-7,0,-3,-3,5,-2,-7,4,1,-7,-1,-9,8,3,9,3,-7,5,-8,-5,2,-8,-7,-8,-8,3,8,-7,9,-6,-1,0,-1,6,-2,-4,-9,6,-2,9,-5,0,0,9,-5,-8],[-2,-1,6,4,3,-5,-8,-6,-2,5,-1,4,-3,-4,-4,3,5,-9,7,-3,-1,-1,-1,-3,-5,3,-9,-1,-5,5,8,8,-9,-7,-7,-3,4,6,-6,-2,0,1,6,9,-9,-2,-1,3,-1,2,2,0,-8,-7,9,9,-1,-9,6,7,1,5,-2,5,6,4,1,-2,-7,-9,9,-7,-1,-5,-3,-5,-3,8,-4,3,-8,-8,0,-5,-8,7,4,1,-2,-5,3,6,3,-1,-2,-9,5,-7,0,-8,-5,3,4,7,8,-3,-7,-3,1,3,-2,-1,4,-5,5,-7,-8,2,-6,1,5,-6,9,5,3,-6,0,-7,6,9,4,0,-7,-4,9,0,-4,-1,6,5,7,-5,2,5,-3,3,-1,1,-5,0,-4,-5,-2,2,6,-6,2,-3,-4,6,-6,4,-5,7,1,3,9,1,-3,-9,-5,4,2,4,-3,-2,0],[4,1,3,-3,-3,6,7,-4,0,2,2,-2,7,-6,3,-3,-7,-1,-2,-3,3,5,0,-3,6,0,-5,-1,-7,7,-1,4,-9,1,0,-2,7,-7,9,-2,7,2,8,-4,2,9,3,0,-7,-1,-5,5,-8,-3,-8,-7,6,-2,-1,-6,7,-4,1,-6,-5,3,-5,-9,6,3,-4,8,3,3,0,5,-1,8,3,-5,-5,-6,6,8,-8,3,-4,1,1,-7,-7,-9,7,-8,8,-5,4,2,8,-3,2,-1,-5,4,8,9,-7,-3,2,1,3,2,-5,4,-4,2,-2,3,9,9,-2,3,-4,2,-1,1,1,-4,-7,-6,-6,0,1,2,-9,-4,-7,6,-5,6,-4,8,-8,-7,-5,-7,0,-5,-1,-7,-6,8,2,-7,7,-3,3,-6,-2,-3,0,0,-6,-7,4,-8,-1,5,9,6,-8,-1,-1,0,1,-7,-2],[9,4,7,-8,-3,0,-4,9,-6,-1,-2,-4,4,9,8,2,-3,-9,-2,1,-8,9,1,-5,-8,2,-9,-4,0,-2,-9,6,0,0,2,4,-3,9,1,-7,0,2,9,1,0,6,-1,2,-7,7,5,-5,5,2,-1,-6,7,-5,-8,-7,9,-6,-5,-4,-8,-3,-8,-4,-4,9,-2,2,-6,-4,8,-1,0,3,-6,5,9,-4,-3,8,-5,-5,3,4,8,-5,5,-1,0,-6,-4,-4,2,-9,0,-8,-5,-8,6,9,-8,6,-2,9,7,8,-4,-9,0,8,2,3,0,-7,-6,7,2,2,-3,1,-6,9,9,0,-4,-2,6,1,-7,-8,-6,-9,2,-3,-3,6,-2,3,-5,0,0,-6,7,-8,-9,8,9,-2,-6,4,6,-9,-1,-5,-6,1,6,4,-8,-9,-5,-5,8,-1,4,-6,8,3,-6,1,1,1,7],[-6,-6,3,1,-9,8,5,-7,-5,7,7,0,-8,-9,-9,-2,-5,8,-3,2,7,-1,-4,-4,-5,-6,-6,-5,1,5,-4,4,1,1,-4,4,-8,-8,-3,-3,-8,-5,5,1,4,-3,-3,7,1,9,2,-7,3,6,-6,2,-6,-4,1,6,1,3,-6,7,-9,-9,8,-7,6,6,-9,4,8,4,-5,1,-5,4,9,-7,-9,-5,-3,6,-3,-3,3,-1,7,4,-8,-4,1,-7,-5,2,8,3,-1,5,-7,7,-7,-9,6,9,-3,5,6,6,2,-4,-8,3,6,2,0,-2,8,-8,-9,4,-4,9,3,8,-2,6,-3,4,9,-1,3,2,6,-2,-3,-1,-4,3,0,6,7,8,-9,6,3,9,4,5,-3,-2,5,-4,8,-6,3,-9,5,6,6,-3,-8,-2,7,7,9,-1,2,0,9,-6,-9,2,-1,-3,5],[-1,-3,6,9,-7,6,0,6,-2,-4,-8,-7,2,-3,-2,-7,-6,-4,9,-1,4,2,-8,1,4,-6,-5,2,6,0,6,-5,7,5,-3,4,-4,-8,6,-5,6,-2,5,1,-1,-5,2,-4,2,-1,-4,-7,-3,0,8,8,-9,-2,-7,4,6,0,-9,-9,-5,-6,-3,8,6,-7,6,8,6,-4,5,-9,-6,-9,5,4,-3,-1,-8,1,3,3,0,9,3,-1,5,-2,0,-1,9,0,-3,-6,-8,9,-6,2,-5,-7,1,4,-3,-7,-4,-8,9,8,3,-7,-2,2,0,1,-3,8,0,-5,-1,-3,7,4,-3,2,2,-3,-1,8,2,-2,-6,2,-8,4,-8,-2,-6,-2,-2,4,-2,-6,-9,1,-2,1,-9,5,-1,9,-3,5,1,3,9,2,-6,8,-1,2,-8,3,-8,4,8,-2,-8,1,-4,-9,9,4,1],[-6,-2,-5,-8,-4,-7,-3,-5,9,3,9,7,4,-9,-2,-8,7,0,1,-8,-6,6,-4,5,-5,4,-9,-6,0,-7,0,4,0,-1,7,-2,-8,0,8,8,-5,3,-6,2,6,8,6,-6,-5,-2,-3,0,-6,7,-3,-8,8,7,4,9,8,4,-1,-1,7,5,9,-9,1,7,-2,7,9,-2,-3,6,-7,-4,-1,-6,9,5,3,8,4,9,-2,3,-1,-1,0,-3,-9,-6,-9,0,7,0,9,5,0,6,-4,-1,7,1,7,-7,-6,-5,4,-4,2,-3,-3,1,-8,1,-2,2,-2,-8,-8,-4,5,8,0,-5,-5,8,-7,-6,-6,6,1,5,-3,9,8,6,7,4,7,6,7,1,-5,-8,8,9,-8,3,1,-3,-8,-5,0,4,-5,0,-8,-5,-4,-5,1,-1,3,9,-1,-2,-8,-6,-5,-7,-9,1,2],[-5,2,4,-8,4,6,3,-6,-3,0,-7,5,9,3,3,2,-3,9,-9,-8,-3,-6,6,-5,3,-8,-7,2,6,6,-5,-9,8,2,-3,-8,6,-3,-4,7,5,-7,-5,9,-3,1,2,8,3,-2,0,5,-9,8,6,1,-5,9,-1,-2,3,-4,-9,-8,-8,-9,6,-3,-5,-1,8,8,-4,4,-2,-8,-3,1,7,3,8,6,-4,5,0,4,-9,3,2,7,9,8,9,4,-5,1,-1,-6,4,2,8,-1,-9,6,7,-5,2,-9,8,-8,1,-8,-3,-4,-7,-9,-3,5,2,-8,3,-4,9,-1,-5,-5,9,-6,7,9,-5,-6,-6,-3,0,0,2,-3,1,5,1,-1,8,8,9,4,2,6,4,6,-4,6,9,7,-8,4,-3,-8,-9,9,-1,9,0,-1,2,-7,-5,6,7,9,-8,-1,7,-5,3,-7,-7],[3,4,0,8,-7,-5,7,5,3,-8,2,-6,-1,4,-8,0,-5,-3,-5,5,5,-8,-3,-2,-2,-5,8,3,2,3,-9,-2,-1,1,6,4,2,-1,-6,-3,5,5,-2,-4,-2,-8,7,-5,-3,6,7,6,-3,-1,2,2,8,0,-4,0,0,2,3,7,-2,-5,2,1,-6,-5,1,4,0,-7,3,0,1,-7,-4,-6,0,1,8,-7,9,6,-2,0,-5,6,3,-2,2,5,0,2,-1,8,1,-3,-3,4,3,-1,-6,-8,7,7,-5,4,2,-3,9,8,6,-2,8,2,-6,-2,-3,2,8,-1,-8,-7,3,8,9,-7,-2,-3,8,8,0,7,-3,1,2,-8,2,-3,-6,7,6,4,-2,-3,0,0,6,2,1,-9,-3,-1,-8,9,0,-8,-7,0,3,-3,-1,3,5,-9,8,1,8,7,-6,2,3,8,2],[9,2,-9,1,7,-4,7,-9,8,9,-7,-7,8,2,-6,-7,2,-8,-6,-1,-6,7,5,-3,1,7,-3,-1,-6,6,7,3,-7,2,6,-9,3,5,1,-6,-2,-4,-4,0,3,-6,1,-4,-9,3,4,-2,7,-6,8,-9,9,-4,-9,-6,-1,9,6,-7,-7,-3,6,-4,-2,-6,-8,7,8,-6,9,-5,9,-8,-4,3,-8,-5,7,-9,7,1,-5,-6,-7,-2,-8,-1,6,0,4,7,7,-5,-5,4,0,-4,-7,-8,2,-1,-1,9,1,-2,9,-8,3,-6,0,5,-5,6,-3,1,9,7,2,5,7,4,3,2,1,1,-9,-9,-3,8,5,-2,3,0,0,3,-3,-3,1,2,-8,6,8,1,-5,0,7,-5,1,-3,7,-9,-4,3,-5,1,-6,-7,6,9,9,5,-6,4,-2,3,-1,4,0,-7,-2,2,9],[-5,2,-3,-9,-8,9,-8,-6,-2,-1,4,6,9,0,-3,-4,-1,-6,1,-3,-4,-8,-7,-6,-1,3,-6,9,-3,1,5,-8,-6,1,-8,2,-4,5,-7,-8,-2,-8,-3,2,3,-5,-2,8,-6,8,-7,-1,7,5,-1,-4,9,6,-5,5,-2,0,3,1,3,-6,0,8,3,-8,-6,-2,6,2,4,1,-4,9,-6,9,-5,-2,1,-3,7,0,9,-3,-8,1,1,0,-4,8,2,2,5,-1,5,-2,-3,8,-7,5,-9,1,7,-1,3,7,-5,-1,-2,-3,6,-9,2,5,7,-6,2,9,5,5,-3,-5,-1,6,-6,-4,-3,-4,6,-8,-7,-4,-8,-1,8,4,3,-4,-9,0,8,7,-4,-1,4,4,6,8,2,9,-6,-8,-9,-7,-4,0,-7,-4,-6,1,-4,2,7,6,-6,-7,-6,-5,-9,4,-7,-5,-8],[9,-2,-2,-1,6,1,-9,-4,-4,3,-1,-3,7,-9,4,6,1,-1,-7,-1,5,8,6,-5,8,8,2,5,4,4,-3,-5,9,2,-6,5,-6,-3,6,7,2,-1,-6,5,-8,-2,-4,-4,-8,0,-6,-6,-8,2,3,-3,6,-4,-6,0,7,0,5,2,3,-5,-2,6,4,7,8,2,-7,2,-5,-5,8,6,2,2,3,-9,6,0,-7,-8,7,-5,5,0,3,1,2,-9,2,1,0,8,2,4,-6,7,-7,0,4,5,6,-6,-5,-6,-8,8,-1,5,-2,9,-4,1,0,-8,2,9,0,2,7,-9,-8,0,6,4,-7,6,9,-9,0,-7,5,7,-4,-1,-4,7,3,1,-2,-8,-6,0,0,6,-8,5,2,7,3,8,-5,9,1,8,4,3,5,8,2,-1,-7,9,7,7,9,3,7,-3,-2,-7,8],[-8,-5,-5,-7,3,1,-2,-2,-1,-5,1,7,9,-1,0,8,7,-7,-4,-9,0,-3,-3,9,-5,-2,-1,-3,7,7,7,-6,-8,1,4,-8,9,-6,7,4,-5,-5,2,5,-3,4,5,-7,-1,-2,4,6,1,-5,8,-8,-4,5,8,0,9,2,1,-5,1,-6,-6,1,5,-2,9,8,9,1,-2,-1,-6,-4,-6,-1,-9,-8,-1,-9,-3,-6,-3,5,9,4,-2,0,-6,-1,8,-4,-2,2,3,1,-2,-4,6,-7,9,-5,2,-3,-8,-9,7,1,6,-4,3,7,-1,9,7,1,-6,1,-9,3,-4,8,-2,8,-5,6,3,3,5,0,-2,8,9,-2,-6,4,7,2,1,-8,0,5,1,0,-4,4,0,-6,-4,9,-9,-2,-2,-8,-2,2,-5,-9,9,-9,7,-9,5,-3,9,7,5,-5,9,-9,-6,-7,0],[7,-3,5,-9,7,-7,-7,-9,-5,5,0,-2,-2,1,-7,0,4,-5,-9,-2,1,7,-4,1,5,-2,-7,-8,8,-8,6,-4,0,7,0,7,9,3,-2,1,7,-8,-4,0,1,-6,-4,3,7,-5,-4,2,-5,-2,-8,-8,1,-4,2,7,-3,7,-7,-6,-9,7,-3,-2,-7,8,1,-8,2,5,-1,8,6,3,-4,1,-7,1,7,-8,-6,6,6,7,-2,3,-9,-7,1,5,5,9,-3,6,-8,-9,5,-5,0,6,-9,-6,2,-2,-9,-7,5,9,3,8,-8,5,-7,-9,-2,2,1,4,5,6,5,4,5,8,-9,8,-8,5,-5,5,9,-4,9,-8,-3,4,-6,0,-1,-2,-5,0,4,-8,0,-8,-8,3,-2,5,2,-9,-6,-7,-6,4,-1,-8,-3,4,-8,-8,-2,2,-1,8,-6,0,5,-7,6,-9,-7],[-2,-5,-2,-6,-5,-5,-4,0,8,6,-7,3,2,-2,4,-8,4,4,7,-9,-5,4,-1,8,7,2,-5,6,6,-5,5,9,0,-6,5,7,-2,-1,7,6,5,1,-7,-6,9,5,-9,2,5,4,4,5,8,-6,-5,3,-7,-5,4,3,7,5,-6,-8,2,-3,-8,9,-6,-8,1,5,-8,5,-5,5,2,1,8,4,2,4,-7,2,-5,-1,9,-3,-3,2,5,4,2,-5,1,-9,-1,-2,8,3,4,7,2,7,-3,8,8,-8,-2,7,-5,3,5,3,-1,-5,7,0,-7,-3,5,-9,-9,-7,-4,0,2,5,8,-1,-3,-8,7,2,-3,1,7,9,3,-5,-4,-6,3,0,-4,2,9,7,-3,-5,3,6,9,2,5,-4,7,1,-2,6,4,-7,-6,-6,0,-5,9,-5,8,6,8,-7,3,3,2,1,-1],[-8,3,-2,4,6,-2,-1,9,-4,6,-8,4,8,-4,-6,-4,5,-2,3,-6,-9,-4,-2,8,-7,3,2,-9,2,-8,-5,-5,9,7,-9,-8,6,2,-6,-1,-3,-3,5,6,-1,8,-5,8,-5,-2,-8,6,0,-2,2,8,9,-5,-4,4,-1,2,-9,7,-3,-4,8,0,-4,-1,0,6,7,7,-4,-7,7,3,-7,6,-8,5,5,7,0,3,0,0,-3,-1,-8,0,-7,2,6,8,-3,8,9,1,-6,-7,-5,4,3,-2,-2,0,-7,7,-2,3,3,-3,-3,3,2,-3,-2,6,8,9,9,-5,3,-7,-6,8,-1,-7,-8,-8,-7,8,9,-8,4,-8,-9,8,9,4,-8,-1,-7,9,-1,-3,0,-3,-9,5,7,0,-3,-5,-6,-3,5,3,8,1,4,-1,6,5,9,-9,-1,-4,-7,-1,3,-2,5,-2,-6],[-6,1,6,9,-9,-3,-9,0,5,6,-8,1,3,-3,4,4,5,4,9,1,-1,9,-5,-2,-1,-4,2,-6,-3,-6,-6,-4,8,-4,5,-1,3,1,9,4,3,3,-7,-2,-9,-7,-2,-2,-9,-6,-9,4,-3,1,6,-7,-8,-9,-4,7,-3,-2,1,5,-5,6,6,-3,3,-4,7,1,-5,-3,-4,-8,-5,-9,-1,-3,-1,-8,-3,-5,-8,-8,7,-5,1,8,2,4,1,5,-2,3,9,7,0,-5,9,6,1,-4,-9,-4,-6,1,-3,6,5,-3,9,-2,4,9,7,-2,-3,3,2,-4,9,-4,3,-9,2,-1,5,2,-9,1,6,3,-2,-3,8,6,-2,2,6,-9,9,-2,-9,-6,-2,-2,6,8,4,-5,-9,6,-4,-5,9,6,-1,7,-4,8,-7,2,4,4,8,-6,-6,5,-4,5,-7,-2,8,5,-3],[8,5,0,3,-6,-3,-5,6,-7,-5,0,5,-4,8,6,-1,-2,-7,-8,8,4,-8,9,5,-1,0,6,-6,-7,0,0,-8,-5,-3,3,-3,6,7,3,-6,3,7,-3,-7,0,-3,-7,6,4,-9,-1,2,4,-3,-6,8,6,-9,-1,5,-5,1,7,-2,-7,-5,8,-8,3,-8,-1,7,-5,1,-8,-3,0,6,0,-5,6,3,-5,7,-8,4,0,7,-4,2,7,-1,-3,8,3,6,5,5,4,-4,5,6,-6,3,-5,4,0,6,6,8,-9,-2,-8,2,-5,-5,4,-9,-1,-7,0,2,2,8,-3,-5,2,-5,-7,-3,6,-3,0,-7,4,9,6,0,9,9,-2,-2,-8,-4,-2,-2,9,-6,7,-3,-6,8,-8,9,0,-4,1,5,7,2,-9,2,-5,7,-9,4,0,2,-6,3,-6,-9,0,0,-6,1,-6],[6,-1,-8,9,-7,-1,2,-3,-2,9,-9,-1,5,-8,0,3,0,-6,4,8,-3,0,-1,0,2,4,-8,9,3,5,2,-1,7,-2,2,-2,-9,-7,-7,-6,-9,3,7,2,8,-5,-5,5,-2,1,8,-6,2,-8,7,-9,9,4,0,-6,6,1,-5,7,-8,8,7,1,-1,3,0,4,1,-4,-2,-6,7,1,-9,-6,3,-2,6,-3,1,-7,0,8,1,-6,-6,8,4,1,1,-5,-5,-1,-8,5,-4,-6,9,-2,1,6,6,4,4,5,-3,-7,3,5,5,9,4,-8,-1,7,-2,-4,-9,6,-6,8,-2,-7,-8,-5,7,4,0,7,7,-6,-6,6,-9,9,0,-3,1,0,-7,4,3,6,-5,-5,8,5,4,8,2,6,3,-3,9,7,7,8,-9,-8,2,-9,8,9,-5,-1,7,-5,-1,2,5,-9,-5],[5,2,2,2,-1,8,-4,-2,-2,0,-2,5,-7,9,7,6,-8,-6,-6,-7,-1,4,8,-8,6,-8,5,-1,-3,-5,4,-4,1,-5,-1,8,-6,6,-9,2,9,-8,-3,3,-3,9,1,2,-3,7,2,2,-1,-9,-4,-9,-2,2,-2,4,4,-7,7,-4,-6,4,-6,-2,3,6,2,1,7,0,5,-3,-4,-9,5,4,-4,-8,-4,-5,-6,8,8,8,3,-6,-8,-9,-4,-7,4,-9,1,0,-4,6,-9,4,7,8,-1,-5,-5,2,8,5,-4,2,3,8,7,0,7,0,-9,-3,-8,-4,6,-8,-6,6,-1,-5,-9,-1,-3,8,-4,5,9,0,2,9,7,6,8,0,7,-2,2,6,-4,-1,-5,3,8,0,-2,3,-6,5,-7,7,-3,9,-1,2,7,-1,-6,-5,2,9,-5,-9,6,-3,-8,2,4,-2,-8],[8,-9,7,-9,3,-7,8,9,-1,6,1,3,-8,-7,-6,1,4,-1,-1,8,-1,1,-4,5,-4,3,-9,3,-9,-5,2,9,-2,4,-1,-8,-6,0,-7,-2,-8,0,-5,-1,7,-5,0,7,6,2,3,3,-5,-7,-4,-3,-9,-4,8,-7,6,5,-9,-4,2,9,-1,6,1,3,-2,-6,9,-6,8,-2,-6,-2,-9,-6,7,9,-9,8,8,7,-7,-6,8,4,-3,3,9,3,4,8,-4,9,2,-4,9,6,-8,-5,2,1,8,-9,3,-4,8,-2,-5,5,-5,5,2,-4,-1,8,-7,8,-6,2,3,-4,9,9,7,6,-5,-3,0,6,5,-1,2,-1,-7,-8,-7,8,-4,4,8,3,1,-7,-3,6,4,-4,6,-4,3,-1,-6,-2,-6,-3,5,3,4,-1,4,-2,-4,-5,-3,-3,-2,-8,6,1,4,5,-6],[-4,-3,-3,9,6,0,-6,-3,-4,-6,8,-2,-2,3,2,3,3,-3,-5,-3,-4,-4,4,0,-7,5,5,6,6,6,1,6,-4,-7,2,-8,-3,-6,-2,6,8,-5,1,5,6,-6,5,6,8,-6,-9,-4,1,-7,5,6,4,-5,1,2,-6,-2,-6,0,3,4,9,6,-4,8,4,-4,-2,-1,-3,4,-3,2,-9,8,5,2,5,-2,-1,3,-4,6,1,2,4,0,1,-6,-1,5,-4,-9,-4,5,7,-7,-5,6,-8,-5,8,-7,3,-1,5,-1,4,-4,-5,-5,-7,0,-2,2,-6,1,-4,1,6,0,-2,-3,0,-6,1,5,7,-7,-9,4,-3,-8,0,-9,-9,-2,-4,-3,1,3,3,5,-8,3,8,8,-2,-9,-9,3,-7,0,1,-3,9,0,-1,-1,-3,6,0,1,6,6,-2,-3,-3,-5,1,6,-2],[6,-1,3,-2,5,-9,-9,-1,1,-8,8,-4,0,7,2,-8,-1,-3,-9,-8,6,-7,-7,-4,2,4,5,6,9,-2,7,9,-9,-5,-3,-3,-3,-4,-9,8,-4,3,0,-6,-2,2,2,5,2,6,-1,8,2,0,-4,-3,7,6,-3,3,2,-6,4,0,4,6,-9,4,7,-5,-6,-4,-2,-7,9,1,-3,-6,5,-4,-9,-2,5,4,9,-2,3,-8,-9,-7,-7,5,-6,-9,3,7,-5,7,5,3,1,-4,9,-9,5,-5,0,2,4,-1,7,-3,2,-9,-2,-1,-9,9,9,8,-1,4,3,-1,5,8,8,-3,8,-6,2,-8,9,-8,3,-2,6,-3,-1,8,-3,8,5,-9,-4,8,4,-9,-4,1,9,-9,-6,-5,-1,-5,-7,-3,-1,-8,-2,7,-5,3,-1,5,-9,-7,5,0,-2,1,-2,-1,-2,-7,6],[-3,-6,0,0,-4,6,-3,4,8,7,-9,-1,2,4,-4,-6,9,-9,-1,-7,3,-8,1,-3,-5,-5,-4,-2,-8,-9,5,8,-5,-1,9,-4,-6,-8,-3,1,4,8,-3,0,5,2,6,4,2,-6,0,1,-6,7,0,1,-2,7,-6,-3,-7,-2,6,-1,0,9,-9,-8,-6,-4,0,1,-4,0,0,6,2,0,8,-3,5,-6,-6,1,-6,-3,-9,0,-3,-6,-3,6,0,-1,8,-9,-5,-6,-8,-4,5,1,2,1,6,1,-8,9,-6,0,-1,9,6,8,-9,2,4,-8,8,4,-7,-5,7,2,2,9,-1,6,-5,3,-2,-6,9,0,5,9,-2,8,-3,-7,9,-3,-5,9,-5,1,3,1,-4,8,7,9,4,-4,0,-2,6,-6,3,-6,-7,-1,-5,-6,4,4,6,-3,4,-3,-3,-6,-3,2,-6,-8,-3],[4,-7,6,4,9,3,4,-9,7,-2,2,-8,4,9,5,3,3,2,-8,-6,7,9,-8,-5,-9,-5,-9,8,8,-7,-9,-6,-9,0,1,-8,-1,-2,7,-3,-6,4,1,-8,0,3,5,8,1,3,-2,5,-7,-7,9,-9,-9,-9,-9,6,9,8,2,-1,7,-5,0,2,-7,0,-6,-7,-2,7,-8,3,5,-8,2,-5,8,-6,7,-7,8,-4,-2,0,-6,-6,4,8,4,6,-5,6,-9,-8,-5,9,6,8,0,5,0,-4,-6,-7,7,1,1,-3,-1,-8,1,2,9,-7,3,-5,-1,5,1,-2,-2,8,7,-5,-2,3,0,-1,-6,0,3,0,2,-8,-1,9,-7,4,1,-5,-6,6,0,7,-3,8,8,7,-4,-6,6,2,-9,-6,-6,4,0,-3,0,1,-1,3,-8,6,8,4,8,4,6,5,2,-1,2],[-4,0,-1,-9,9,0,-9,-9,5,-1,6,8,7,-2,5,6,9,-3,-9,6,-3,-9,-8,-7,2,9,-3,-2,4,-5,-9,7,-7,4,9,-9,-6,-9,0,-4,-9,-9,-3,-5,8,6,9,-3,-7,6,7,-6,1,5,-7,4,0,-1,-9,4,-5,-8,1,6,2,6,1,-4,4,-9,-3,-4,4,-9,7,8,7,9,7,5,-9,-5,-8,-7,4,6,-7,-8,4,6,2,8,-6,9,-1,-2,-2,-4,-8,8,3,9,-6,-9,2,-6,6,-4,9,-6,-4,-2,1,4,1,9,1,-6,1,-7,0,4,-7,-6,-7,8,9,3,-7,-3,-3,7,-5,-8,-5,9,-1,3,-1,6,-3,-7,9,5,4,9,-2,-5,7,7,-8,-3,-9,4,9,-9,4,9,2,-4,-3,-9,8,-9,9,-2,3,-2,-9,-1,-3,0,-2,-9,-9,5,-8],[-9,2,-4,1,0,-7,7,-9,2,4,-3,5,6,5,0,-3,-4,7,-9,7,9,6,-5,2,1,6,6,8,7,3,-5,-8,8,-7,-9,9,6,3,-5,9,2,2,-2,7,4,-3,6,2,6,-3,0,3,-1,6,4,-2,1,5,6,6,6,3,-5,-6,5,-6,-7,-3,-3,-1,4,-7,0,-9,5,9,1,8,-7,3,3,-6,2,4,8,3,-1,2,-9,-3,3,1,0,4,-4,3,-7,3,-6,3,-1,3,3,4,-2,2,3,8,6,-9,1,-8,7,6,-9,-5,-4,-5,-4,-8,1,-1,-9,-7,8,-9,-3,9,-6,7,-2,7,-2,-6,3,-5,3,-6,-3,9,-5,8,1,5,8,-8,6,4,-7,-4,5,-6,4,0,-6,8,-4,7,-1,6,-6,8,3,9,8,4,-2,-3,4,4,-9,4,-1,0,-4,-9,-7],[5,-3,-9,-4,7,-2,-8,0,-6,-3,2,-7,6,5,8,-6,2,6,-8,-3,5,0,-8,6,8,-1,3,4,-8,8,2,-3,1,-3,-9,-3,7,3,2,-9,-9,-9,-8,-3,-5,7,1,-4,0,-9,-8,7,-2,3,8,-2,5,-5,-4,-6,2,-4,7,-4,0,7,8,3,-7,1,5,9,-9,0,-6,-9,7,-1,3,-1,-2,7,3,-9,0,-2,8,1,-9,7,6,-4,4,-4,-1,-8,-8,-6,8,5,-9,3,-8,-1,9,4,4,-6,3,8,-1,-4,-2,7,-4,3,1,-2,6,3,-9,-7,2,1,4,-5,6,3,4,0,4,-3,1,-6,-6,-1,-6,-5,9,9,-2,-1,-7,-7,8,3,5,2,-4,2,3,2,-9,-8,3,5,8,7,-4,0,-6,-8,0,4,0,3,4,6,1,0,-2,-9,3,-8,-1,-9,-4],[1,-4,7,-7,-7,-3,9,9,0,1,0,-3,-1,-2,9,7,6,-4,4,-6,9,3,3,1,3,6,-5,2,-9,8,-6,1,-3,-1,-7,-9,7,-7,-8,6,-8,-1,9,3,4,4,7,-7,-5,9,7,-1,-1,-7,-5,-8,-8,4,2,9,-3,-4,-3,2,0,-3,-3,-9,6,3,-2,7,-1,-7,2,-1,-4,-7,-7,-3,4,-3,-7,-7,-2,9,8,9,3,6,1,1,8,1,7,2,9,9,-4,-8,0,3,-1,0,-7,8,-5,5,2,6,-1,-2,-8,0,-9,5,9,1,-3,2,1,-7,-6,4,-1,7,0,2,7,7,-7,4,6,1,2,-1,-1,-5,-5,1,-7,-4,-3,-4,-6,-4,-8,-7,-4,8,-9,7,1,4,2,5,5,4,-8,-4,-3,-3,8,-9,5,9,-9,1,-3,9,-6,-9,9,6,1,-2,3],[5,-7,2,1,-6,-8,0,-8,0,-5,-3,-6,3,-2,1,-9,2,-5,0,7,2,0,5,5,0,8,9,-8,0,-1,-3,-3,-9,9,9,4,4,-1,-7,6,6,6,9,-2,-9,0,-5,8,-7,8,7,2,2,-5,3,2,9,4,-7,6,0,9,5,3,-4,-6,0,-3,1,4,-5,-9,-6,3,2,-6,-6,-6,-5,2,-5,-6,1,0,6,-9,-9,7,0,7,4,-6,-4,-5,-8,-5,-7,-7,-4,4,-2,7,2,0,9,-4,2,-6,-2,0,-2,7,-8,5,-5,1,-7,9,2,-6,-1,2,4,-4,7,-1,0,5,-3,-1,-5,-8,-3,-9,8,-3,8,6,5,-1,-7,-9,-5,3,-3,1,-8,7,-6,-1,-1,9,-4,-1,-3,6,-1,5,3,-5,9,0,-4,-6,-4,1,7,-8,8,-8,-9,-9,-6,-3,-6,-6,-8],[5,-5,3,3,9,6,-1,0,2,-7,-1,-1,-8,6,-5,9,-5,-6,-9,5,-5,-1,6,3,-7,-7,-4,-4,7,1,6,5,5,0,8,-6,-9,9,-7,-9,-2,0,-5,3,2,7,-6,4,-5,9,-3,-2,-1,4,-4,1,9,-3,-2,6,-8,1,4,-8,-9,7,-8,-7,1,6,3,6,-2,5,-4,4,5,6,-2,1,8,7,3,3,8,0,8,-9,5,-7,-1,-9,-5,-2,6,-9,-1,4,6,6,-2,-9,-1,5,-7,0,4,3,9,4,2,3,5,3,-8,0,5,-2,4,-2,-5,3,6,8,-8,1,6,2,5,-8,-3,4,-2,-1,1,9,8,2,9,3,3,6,9,-9,2,8,-5,7,2,7,1,1,-2,-7,4,-9,0,-8,-5,0,-3,1,8,-3,5,-9,-8,8,-2,5,6,-2,5,-9,-6,-4,2],[6,2,-1,1,-5,-4,-5,-1,0,-5,-1,7,5,-8,-5,5,8,-2,1,-7,-8,-9,9,-8,2,-1,6,5,7,8,-3,6,-6,-9,-4,-4,9,-3,-3,7,1,5,-5,-3,-4,3,6,7,4,0,8,-4,4,9,1,8,4,-8,2,-1,-8,-9,-1,-8,-5,-4,6,4,5,-9,-1,-3,1,-4,-2,1,-9,6,-7,2,0,-4,-5,-2,0,4,-8,5,6,8,4,-4,8,3,-3,1,5,2,9,4,-5,-6,-5,0,-7,3,4,-7,9,9,-4,1,0,8,-2,3,-2,-7,6,4,3,-2,-7,-6,3,1,7,-6,-6,8,-2,-7,2,9,6,2,-2,-3,-9,4,-7,-6,5,2,2,-3,-7,-4,4,-7,7,-6,9,5,6,5,-8,-5,-1,-7,-8,-4,5,9,4,-5,7,5,-7,-6,5,-6,-6,8,-4,-8,-3],[1,8,-4,-4,-3,-8,-2,-6,2,7,9,0,-5,-3,1,-1,-2,5,2,8,1,-6,-6,-3,9,7,8,-6,5,-1,3,7,0,-9,7,4,-1,0,-7,-7,4,9,0,-6,-9,1,-6,-1,1,4,-8,-5,8,-3,-1,0,-6,6,-9,5,8,-2,-7,0,-4,-1,-3,3,-4,-9,2,0,-8,-1,9,8,8,2,-6,-5,-1,7,8,8,9,-8,-6,5,-5,2,-3,-7,-3,1,-3,-4,-2,9,-6,2,-2,5,-2,-6,6,6,9,2,0,-2,6,7,6,3,-9,8,-3,4,5,-4,-6,9,2,-2,7,-2,5,1,0,1,-2,0,7,1,7,-6,8,0,-4,-3,7,2,-3,0,1,0,-1,8,-8,4,2,-3,-1,1,5,-9,0,3,0,2,7,0,-6,-6,7,5,3,8,-9,1,-7,-1,-6,-7,6,7,-4],[-3,6,0,9,-1,-4,-1,-5,-2,1,8,-6,-4,6,0,2,-6,7,1,8,5,7,-6,-4,4,7,-1,7,1,4,-4,-7,5,1,5,-9,0,-8,-3,8,-8,-9,-9,-1,9,7,9,-8,1,-9,1,1,1,8,1,7,-7,-5,-3,5,-8,0,4,-1,-4,-8,4,-8,-7,-3,9,2,-7,-1,5,3,5,-5,6,-5,-8,1,1,-5,-8,-9,-9,3,1,-5,2,-2,-5,-2,7,8,-5,3,-6,-5,5,5,-5,9,-8,5,-8,-9,7,1,-5,5,6,-7,1,7,9,4,-3,2,-5,0,2,1,9,-1,-7,-5,5,4,-8,-5,4,0,7,3,2,4,-4,4,5,8,3,8,-6,1,1,-8,1,6,-3,9,-4,9,3,-6,-9,-6,4,3,6,0,-9,-3,-9,-1,8,8,8,-1,8,3,-7,-2,2,-7,7],[-4,-9,8,-7,1,-8,2,-6,4,-3,3,-7,-6,-1,-2,-1,4,-2,-5,7,7,9,6,-3,5,-9,4,-8,2,6,7,-7,-1,8,-6,5,-1,8,-5,-3,-8,8,5,-8,4,-4,-9,-5,8,-4,5,-9,-9,-3,9,8,1,-9,-2,7,-2,3,-6,0,-5,7,-1,-7,-4,1,5,-3,5,7,-8,5,-5,2,-6,-8,1,3,2,6,7,0,7,-8,-9,-2,8,-8,4,6,3,0,1,2,-4,2,-3,7,0,-4,4,5,0,3,7,-7,-2,-2,-3,3,-2,-8,7,-2,-5,-4,-4,9,-4,5,2,-3,4,-9,-1,-5,-4,-6,9,-8,4,-1,4,4,-3,-8,-9,-8,-6,8,0,2,8,-8,-9,1,0,-2,0,-8,-5,-7,5,-9,3,-8,-5,8,-7,-6,1,8,-8,-1,0,0,6,9,-6,7,3,-3,1],[1,5,-8,3,2,0,-1,2,-5,-8,6,1,4,-8,9,4,3,-4,-5,3,4,-3,-7,8,7,-6,6,-6,3,1,-3,9,8,9,-1,-8,-7,9,-7,-6,5,6,2,8,1,0,-6,-2,-8,-1,-5,7,9,0,-9,8,7,5,7,4,6,9,8,-4,3,-5,1,5,-9,0,-9,6,9,-3,6,-8,1,-6,-2,5,7,2,-5,5,-9,-6,-8,0,4,-7,4,6,-7,-3,3,8,7,5,-5,-3,5,5,3,-1,7,-1,-9,-7,-2,-2,8,5,3,-9,5,0,1,-7,-8,7,1,-3,-7,-1,-3,-8,3,0,-5,7,-9,8,6,7,-2,2,3,-5,-9,-2,5,0,-4,-8,8,-2,5,2,-8,5,-8,-5,-7,7,0,-7,2,0,8,-1,-5,2,3,-7,-2,5,-4,-9,-4,-2,7,2,4,3,8,-9,-4],[-1,6,-2,-7,8,7,-7,1,-8,6,7,9,9,1,8,3,-9,-8,5,9,-2,6,2,0,-6,-2,-5,5,-5,4,7,-6,-5,-6,4,4,-9,-6,-2,-8,4,2,-3,1,0,-5,5,-1,-6,-2,7,-7,-2,-6,-4,-3,8,7,5,9,5,-7,-9,0,-7,1,-2,0,9,-3,-2,1,-4,-2,8,6,6,-2,-2,-6,-8,0,-5,0,-4,-5,-7,-6,9,-3,9,-9,-7,-2,-9,2,-2,8,6,3,-3,0,0,1,-6,-5,9,7,-3,-7,5,4,-5,-6,-9,0,-5,-4,0,5,2,-7,-4,2,2,4,6,-3,2,-1,7,8,-7,-4,5,0,3,0,4,-9,8,5,3,0,-6,6,-4,3,-2,0,-3,-5,9,1,-5,0,-7,-5,7,1,-5,3,5,-9,-1,7,3,-6,7,-3,0,-8,-6,5,6,9,8],[0,0,-7,-9,0,5,-4,4,-7,-1,-7,-1,9,-6,-7,3,-6,9,9,0,8,-9,-3,-5,-8,6,-1,-1,-2,0,-4,-4,2,-6,4,-9,-2,1,-2,4,5,-7,2,3,-3,-4,8,-4,-4,-1,-8,-4,8,-9,2,-5,-7,-3,1,4,5,-4,7,1,0,2,8,-6,2,-9,-8,-8,-1,-4,-7,-9,-8,-7,4,-4,8,3,4,4,-9,-3,-1,-6,5,7,4,3,-5,5,-6,-8,-7,1,-5,2,0,6,8,5,5,8,7,0,-4,-6,0,3,-8,8,3,0,-8,-6,-4,2,0,1,0,2,1,-4,-7,-7,1,6,-8,2,-1,-1,6,-3,-6,2,-3,2,7,-6,-1,8,-4,-7,0,1,5,4,1,2,-6,9,9,9,1,3,-3,9,1,8,4,3,-2,-5,-9,6,-6,-9,-8,-3,8,1,8,-1,8],[-4,5,7,-1,-2,9,7,4,-5,-6,-6,-1,4,9,3,4,9,9,3,0,1,3,6,-1,-4,-2,3,7,-5,-2,1,9,3,4,9,-9,6,4,8,-5,2,-2,-3,-7,-6,6,-3,-1,-1,-6,4,2,6,1,0,-2,-4,-7,-3,-3,6,7,2,2,3,-2,3,-9,2,4,-5,1,-4,7,-6,9,-7,9,8,4,-5,-3,1,-5,2,-6,-4,-9,2,7,6,-1,-1,5,-6,2,-9,1,-5,-1,5,0,0,-6,-6,1,1,-2,9,-8,5,4,7,9,3,9,9,-7,9,-2,8,8,-8,0,-1,0,8,9,1,0,7,-4,0,-4,5,-9,3,-8,9,4,-4,-6,7,6,6,6,-9,5,-4,6,5,-7,9,-4,-4,0,3,4,8,-1,0,-6,8,9,8,0,-9,-4,-8,0,-4,6,-3,9,-5,-1,3],[8,4,-1,-8,8,-2,0,-9,7,-1,-2,4,7,-1,7,-5,3,9,9,-2,7,-2,5,3,-9,-3,6,-9,-9,7,6,0,9,6,0,-8,-5,-1,-8,-9,-1,1,0,-5,1,-4,-2,7,4,-3,-8,8,-6,5,-4,2,-7,-6,-8,-2,-8,2,-3,-8,-7,-2,8,-4,-1,-2,0,-5,3,-4,-8,-9,-1,9,0,-3,9,7,-9,-9,-5,0,6,8,2,-6,2,-8,1,-1,-2,2,9,-3,0,9,-3,0,4,5,2,8,-7,9,-4,1,-2,-8,6,6,6,-4,2,-8,1,4,-8,5,9,-4,7,4,1,3,7,-4,5,5,-6,1,-5,8,8,7,0,0,6,5,-3,-1,5,5,-6,9,-9,-2,9,-5,-8,-6,1,1,-6,-6,-8,-4,-9,-3,-1,-4,3,7,-9,-2,-1,8,1,-3,-2,-7,-8,-1,-3],[-3,-9,1,-9,5,6,4,3,4,9,0,-8,-9,-6,2,1,4,2,0,9,3,-9,-4,-7,7,-4,-7,5,9,2,6,5,6,7,2,3,-9,7,5,3,0,4,0,6,1,1,5,-8,-8,7,0,2,-2,-7,-6,-2,-3,3,-8,-7,-2,-5,8,-2,6,0,-3,-4,0,-1,3,-6,3,-4,-2,0,0,1,9,8,-1,-9,-9,6,0,5,5,0,6,4,1,-5,2,4,8,7,-3,5,3,1,7,9,5,3,-3,7,-9,-3,-5,-4,3,2,-3,-3,7,-3,7,-6,-3,-7,-7,-3,-1,-6,2,-7,-2,5,5,6,9,0,-1,-9,-6,-6,0,-9,-5,3,7,-1,-4,6,-8,3,-1,5,-5,6,-7,-3,0,-8,-6,8,-9,8,-6,9,-1,-7,-2,3,-7,0,3,2,-8,-2,-3,-7,0,-5,-2,-1,-7],[9,-1,-8,6,7,8,-9,3,-4,-3,6,1,-7,5,5,7,-1,-4,6,-9,-4,2,1,0,0,2,-4,3,-3,0,-7,-4,5,-1,4,6,3,-1,-4,-3,4,2,8,-3,-3,-4,-8,-4,1,9,6,-6,-1,3,7,2,-8,-3,-9,2,-1,5,-1,0,0,7,2,-3,8,2,-9,9,-9,-5,8,-4,3,-3,5,-2,9,8,-2,8,-4,-1,7,2,6,-8,6,-1,5,-8,-6,-2,6,-3,0,6,2,2,-6,1,7,4,2,-9,6,3,0,2,-8,1,0,-9,-1,-7,4,0,9,1,0,-3,9,-4,-1,2,-2,-3,5,5,9,-2,5,-4,-3,9,-1,-6,-8,8,-8,3,4,-9,-2,3,-6,2,6,6,1,-1,3,3,-7,6,-2,9,-4,2,-5,-8,1,-5,9,-3,-2,-8,7,6,9,4,-7,5,9],[3,-6,2,-5,-2,3,-5,-7,9,7,-8,4,-7,9,-1,-3,-6,0,4,4,8,8,0,8,-3,5,8,6,-8,1,0,-8,-8,-4,-5,-5,-5,5,8,-5,8,5,-4,-9,6,-6,-2,-5,-3,-3,-4,8,-6,-9,-8,5,0,2,8,8,-7,-3,-4,0,8,2,-4,4,1,8,5,4,9,-4,-2,-5,6,4,-6,-9,4,9,-4,-2,9,-9,-3,8,-1,0,7,-3,1,5,-7,-9,9,4,-5,5,-3,9,4,7,-6,-3,-3,3,0,-8,-8,6,-3,5,-4,9,1,9,5,-9,-9,0,-4,2,-7,8,-6,-9,-6,7,1,3,5,8,-8,0,-9,9,-9,-5,4,-5,-6,2,1,-7,7,7,4,6,-5,6,-5,-6,3,1,-8,-4,5,4,9,-4,6,6,-8,-9,2,6,4,-2,6,0,8,3,-1,3,-6],[-2,-2,-4,3,-1,5,4,-6,-2,2,-1,4,5,-6,2,7,6,0,1,0,-2,5,9,3,-3,-3,1,6,-2,-6,5,-7,7,4,1,-6,1,-4,-1,0,-8,-7,0,-4,3,0,5,0,0,0,-1,7,-1,-3,-3,1,9,-6,2,8,3,-1,0,-1,4,0,4,-9,-8,-3,3,6,-8,0,-3,-3,-2,9,-3,-4,0,-3,7,9,-7,5,-2,-8,-2,8,-7,7,2,0,-4,-3,-1,-6,9,-2,-1,-2,-7,1,-9,5,7,-3,3,4,-3,-7,-7,4,-3,0,2,-9,-8,3,3,0,-3,6,-9,-6,-8,-8,1,-5,-2,-8,-5,-7,-3,-9,-6,-2,9,-1,4,-6,5,8,2,-6,3,6,2,-2,4,6,-1,-6,-3,3,-7,-2,2,3,4,-5,5,-8,4,-1,-5,3,9,-6,5,-5,6,6,-2,-6,-8],[5,2,1,0,7,6,4,1,-2,-8,1,-5,2,3,-3,8,0,-6,5,-3,-1,-7,-5,8,7,2,-1,-1,1,-7,-6,2,6,-3,-8,2,0,-7,0,-3,7,-8,4,5,1,6,2,8,-6,-5,-9,8,-3,-4,7,5,4,-2,-6,-4,1,-7,-4,7,9,-7,4,9,-9,3,4,7,9,-5,1,3,2,9,4,-7,2,-2,-3,9,3,-3,1,9,-4,-6,1,-5,1,9,-8,-9,5,1,3,3,-8,-7,5,9,3,8,7,9,-9,-4,9,-6,-3,9,7,7,1,0,8,-5,-1,3,-1,7,-6,6,-6,-7,7,1,1,6,6,1,6,-4,-6,0,6,-1,2,-2,4,1,-3,-2,-8,5,-5,-8,-4,3,-1,0,3,0,-7,7,8,9,-2,-5,3,6,-3,-3,5,4,2,-2,-9,-6,4,1,0,-7,-8],[3,-6,-3,6,4,3,7,-1,-5,-7,7,8,-4,4,1,-5,-4,7,-5,2,-6,4,-8,4,-2,-4,3,-5,4,-6,7,-8,9,-8,3,9,-3,1,-2,3,-8,-6,-7,-4,-5,9,-5,7,-2,8,2,3,-9,-1,-1,8,-2,0,-5,-5,-6,-1,2,9,2,-3,7,-6,-7,-5,-1,8,-6,9,-8,9,0,2,8,-9,1,-1,3,7,5,-3,-9,7,-3,-8,8,-1,-7,6,4,-4,-8,-2,9,1,-4,9,-8,-8,2,-1,2,4,-7,8,7,-6,-4,-9,-4,4,5,-8,-2,0,-5,-5,-3,5,7,6,-7,6,-8,-1,-4,-5,6,-3,7,5,4,3,-8,-6,6,2,-1,7,-6,9,-6,-4,-3,5,-1,8,-6,-7,-2,-6,-1,-1,-6,7,6,3,4,4,4,-3,-4,2,5,-7,3,-1,-1,8,-2,8,-4],[5,-6,5,5,1,1,7,6,-9,7,1,-1,1,6,7,3,0,7,-6,1,6,-7,9,-1,-2,6,6,0,-6,-1,4,-5,-5,9,-3,5,-8,0,-3,-9,-7,1,9,3,-8,9,3,-6,-3,-2,3,6,-6,5,-9,9,-7,-7,-2,-4,-8,-5,-6,2,9,-1,-3,-2,-1,-8,7,2,-5,-5,-8,-7,9,9,5,1,-2,-8,2,-2,-7,0,4,7,5,-7,0,-7,-1,3,6,-2,0,9,-6,-1,-5,9,1,-6,0,1,9,-5,4,-3,5,7,-4,7,8,9,-2,-3,4,-5,4,4,-2,8,4,9,3,-9,4,-6,3,0,7,-5,-9,2,-7,8,-3,-6,-7,3,-8,9,-3,3,7,3,-7,-2,7,-1,9,0,-1,9,-1,-4,-2,1,5,3,-4,3,-5,3,7,5,-9,-3,-1,9,-7,-2,-6,-6,-5],[1,9,7,9,1,6,8,-3,-5,-5,-3,-8,-9,-6,-6,2,-6,1,4,-7,8,7,-5,-3,8,6,9,-6,-9,-5,7,-7,-2,-7,-1,-2,-5,3,-9,-6,-3,-7,0,-3,1,6,-4,-7,3,-7,-5,-2,-4,-6,5,-4,-7,-6,-3,7,1,4,-9,-2,0,-3,-3,0,1,-9,9,1,2,3,-7,-5,-8,8,4,7,1,4,-8,2,-6,-7,0,-4,-3,-2,-6,-4,3,6,0,-1,-2,-2,-7,-6,-3,-3,4,-5,-1,4,4,3,1,-1,8,-7,-7,8,-2,4,-5,4,1,8,-6,-7,-3,5,1,-2,3,9,7,1,-7,-5,-8,9,1,-8,-6,-3,-3,-4,-7,4,-7,-6,3,-9,1,-1,7,-9,-8,-8,-7,-1,3,7,2,-9,3,-3,3,-8,6,-5,3,-8,1,8,-2,1,-2,-9,7,-5,-4,-3,3],[2,-9,6,-5,-9,6,-2,8,-9,6,-2,-2,2,5,-8,-2,4,0,4,1,-2,-9,-1,-8,-4,-3,3,-8,6,-7,0,0,8,5,1,6,0,-9,-4,3,2,8,1,0,-5,-7,9,2,0,-2,-3,5,-1,2,0,7,0,1,0,3,5,2,0,6,-7,-5,-4,7,1,7,-4,9,-7,9,0,-8,3,-1,-2,-8,6,-1,-5,-8,-2,0,7,6,4,-3,5,-6,-8,6,2,-3,9,-7,6,3,-4,2,6,-6,-9,3,2,7,5,6,2,8,-7,-5,-2,-5,0,4,4,-8,9,3,5,-4,2,3,-8,0,9,7,6,4,0,7,0,-5,-8,3,-1,-9,2,-7,-8,-6,2,-2,-9,7,-4,-6,4,-5,3,-3,-8,3,8,-4,-9,3,5,8,0,1,9,-4,-1,8,8,-4,-3,-2,-4,9,0,-3,-9],[-6,5,-8,-4,-8,-9,-7,6,2,5,-1,5,-2,-3,4,-1,5,-4,-1,-3,-3,-5,-1,-7,-7,-5,1,9,-2,-2,2,2,5,-2,-7,-5,-8,1,8,-9,8,-8,7,-5,-6,-3,-5,-7,3,4,-5,-8,7,4,-1,-5,1,7,4,6,-7,7,-1,-2,7,-5,-7,-8,6,5,5,-4,-4,-3,-8,-4,3,1,9,-4,9,-4,2,-3,9,-2,-6,-8,8,-2,6,5,9,8,2,1,9,2,1,-8,0,-4,-2,0,-4,-6,3,7,6,1,8,-7,5,1,5,5,5,6,-7,2,8,6,5,7,-5,-3,0,7,-3,6,-5,-1,2,0,1,-6,2,-8,-7,0,-7,-6,-2,-9,-1,9,-3,7,8,4,-3,6,-8,4,3,-2,-9,-2,-8,3,-9,-5,0,5,1,1,9,-9,-8,-2,4,5,8,6,-7,-8,8],[2,6,9,5,2,-3,6,-3,8,3,-4,-2,-2,7,7,-5,2,-7,5,-4,0,-3,6,8,0,9,-1,-4,8,-4,-5,0,0,8,0,-4,7,-9,-9,6,-4,5,2,-3,8,0,-6,-4,7,1,0,-7,3,-8,6,-9,-8,0,-7,-9,-7,1,6,0,5,7,0,3,-8,7,3,-9,-8,-6,-2,-2,3,-2,-5,-3,-3,-3,6,5,1,6,3,-1,-2,-2,8,6,1,-8,-8,-7,5,9,1,8,-5,7,3,-7,5,9,3,1,8,-5,5,2,-1,6,1,3,-8,-3,8,6,9,5,0,3,6,-6,-8,7,-2,3,-1,-3,4,-6,7,5,1,-4,5,-1,-7,7,-3,-4,-5,4,-7,9,0,-2,5,8,8,4,9,-8,9,-5,-5,-9,-8,-3,9,8,-1,3,-5,0,2,-2,9,-1,-9,3,-1,3,-4],[-8,-2,-4,2,4,6,7,4,-5,-5,2,-8,5,-6,-9,0,3,2,7,2,1,2,4,3,-4,5,9,-5,1,-3,9,0,-2,4,7,-7,5,0,-7,-6,7,-5,-1,8,7,7,5,8,7,-1,1,-4,5,3,-3,6,-1,2,9,-8,-2,-1,1,-1,-6,6,9,9,-2,-8,2,8,9,5,-7,-7,2,-7,-8,4,1,0,-6,0,-1,-7,-4,-4,-6,-8,0,1,3,-5,6,9,-9,-6,9,-4,2,-3,-2,-6,-5,9,-5,-2,-7,-1,1,-7,6,-2,-4,-7,-9,-4,0,0,-9,8,5,8,4,0,1,1,4,1,9,0,9,-4,6,7,-1,4,6,-5,-1,-2,1,2,1,-7,-7,5,0,4,8,-9,-1,1,5,1,-8,-5,0,3,1,5,1,6,6,7,1,-9,3,6,9,-8,3,-3,6,-7,-2],[-7,7,-6,8,2,4,8,4,-4,9,-5,5,-3,5,0,0,5,7,-1,-6,7,-9,-7,-6,9,-1,-1,-3,8,9,4,1,-8,1,6,8,-9,7,-2,6,4,-8,3,-9,2,6,0,6,-2,2,3,5,9,-7,-5,-6,5,5,-3,9,-1,1,5,2,4,-1,2,-8,-2,8,-2,1,-9,-1,5,2,5,-5,9,-1,-3,-8,9,9,5,3,-9,-1,-1,-1,-6,-7,6,-4,-9,9,7,3,-7,2,7,7,-2,-4,7,-4,-7,-6,-8,9,2,-8,-6,-7,7,-1,5,-4,5,5,1,-6,5,7,-1,-3,0,-2,-3,-7,3,-1,0,5,7,-5,2,2,-5,5,-9,-1,-9,-7,-6,8,4,-2,-2,4,-5,1,-9,-6,-7,-3,6,-4,0,-6,-5,1,7,-7,-1,3,-7,1,-2,0,1,8,-4,0,1,-2,-4],[0,7,-9,2,6,9,0,9,2,-7,1,8,9,1,1,-2,9,9,5,-1,3,-1,-8,-2,-4,-9,-9,-3,-9,-3,2,-9,-2,7,-8,-2,-5,0,-5,-7,-2,-3,3,2,9,-1,7,5,0,8,9,-4,4,2,1,-1,-2,-9,0,0,-5,0,6,9,8,-8,6,-1,-6,-2,3,7,5,-4,-1,0,-5,-2,-2,3,9,-8,1,-1,-7,-4,-2,-7,0,7,9,4,-5,-4,-1,-6,2,1,2,8,-2,1,7,3,-7,9,2,5,-7,-5,-3,6,-8,-5,7,7,4,1,3,3,8,0,3,7,0,-1,1,-6,6,-8,0,-6,-8,6,-7,-8,-5,-6,-6,0,-2,-3,5,1,-8,6,-7,-1,-6,-5,5,0,-1,3,-9,2,-1,-8,-6,-5,7,6,2,-9,-4,4,-6,1,0,6,2,2,6,-1,-4,0,-2],[7,7,4,-5,4,4,4,9,-9,-7,-7,0,7,7,-8,8,-8,6,-5,-4,-8,0,-9,-7,7,5,-2,9,9,6,4,-2,-2,0,-8,-5,0,4,2,8,7,-9,-2,-2,-5,-6,1,-5,-1,-1,5,8,-4,-4,5,4,0,1,-3,-8,4,-1,8,-7,-7,-5,5,4,-8,1,2,6,2,9,2,-6,-8,-8,4,-8,6,-5,-9,-4,2,-8,4,1,8,-3,5,7,-8,7,-5,-8,5,9,-9,4,-6,-6,-2,-7,9,6,2,-5,-8,-3,5,-2,-5,-2,6,6,-2,3,-5,-5,0,9,3,2,1,-6,-4,-7,1,-4,6,6,-4,3,-6,-1,9,-5,2,1,1,7,9,-5,-2,-4,2,0,0,9,3,-8,-3,1,6,-5,0,-7,0,8,7,3,-4,2,6,9,-4,3,-3,9,7,7,1,-7,-2,6,-5],[1,8,-3,1,-1,0,5,-5,5,-3,-7,-4,-9,-8,8,-3,-4,6,7,-4,-4,6,-7,6,2,3,-6,0,-5,6,1,4,3,1,-3,0,-2,7,-2,3,-3,-3,6,8,9,3,-3,-6,-2,-4,-9,1,-3,5,9,5,5,6,3,6,-1,-1,-3,-3,-6,0,-7,-2,1,-6,-7,-1,9,-5,4,-4,-7,-3,3,8,-8,9,-1,-9,-4,-9,9,-1,1,0,7,2,8,7,4,4,-7,-1,8,-3,8,-5,6,-6,-1,8,-6,5,6,3,-4,7,4,7,0,-5,-6,9,-6,-8,3,-4,-1,-5,-6,-8,2,2,0,4,8,8,6,1,-7,6,1,6,-6,-9,6,-2,1,6,-1,3,5,7,-5,-9,0,3,-4,-2,-5,-1,0,8,3,1,-8,-1,-4,3,-8,-9,-7,0,-6,3,9,0,-5,3,7,-3,-3],[1,-3,2,-9,5,-4,2,3,5,-5,5,6,-1,-5,8,4,-2,0,5,7,-4,-9,-8,4,2,6,-7,0,-2,5,-5,-2,3,0,-6,5,1,-9,5,3,-2,-8,2,-1,0,-1,-4,5,1,8,5,0,-7,9,-4,-4,8,-2,8,3,-2,-3,9,5,3,-3,-7,2,4,-8,-8,-8,-5,2,3,5,-9,1,-1,-4,-8,1,6,-3,0,-2,2,7,-5,9,5,-4,7,-4,-3,2,-2,-6,5,-5,-3,-5,-3,8,6,9,7,8,-3,7,5,6,-7,6,3,7,-6,-5,0,4,-4,-4,0,7,2,-3,1,-8,-6,-8,0,8,1,-4,8,6,4,7,-6,-2,-4,-7,-3,-7,-3,-2,7,-6,-1,5,4,8,2,-4,2,0,2,-2,4,7,7,-4,8,-1,-1,-2,5,1,9,5,-2,8,9,-2,-8,9,4],[2,-3,0,-3,2,2,0,-8,8,4,-7,8,-7,1,-9,5,-2,-8,4,3,-4,8,-1,1,5,4,3,-4,2,7,-9,-7,6,7,-6,-5,-4,-2,5,-9,-5,5,0,-3,-8,-2,-5,-9,8,-8,5,3,-2,-1,6,5,-3,8,5,1,8,8,2,9,-8,5,8,-9,8,6,7,-4,-1,1,-3,-1,5,3,-4,9,-9,-3,-1,-5,8,-5,2,3,-6,-4,-8,-8,-2,-7,4,-2,4,7,-1,7,2,-1,8,-2,9,-9,-5,-9,-9,8,9,1,-3,2,-2,8,2,-5,9,-3,1,-3,6,9,-7,-3,-4,7,-2,-6,-7,2,-9,5,4,-1,4,5,9,-2,7,3,-1,-5,-6,4,-1,-4,7,-9,7,7,-6,9,1,-8,7,-8,7,-4,-4,9,3,7,1,5,1,7,-1,2,5,7,9,2,-7,6,-6],[6,-8,-8,-4,5,3,-8,8,-1,6,-7,3,-8,-7,-5,7,-5,6,-7,1,-4,-3,2,4,-7,4,6,6,9,9,6,7,-5,7,-6,8,-7,7,4,-4,8,7,-3,0,-1,3,-5,1,0,-9,6,-5,-4,-4,5,-9,-7,-2,-6,-4,-1,0,-9,0,-8,-5,-6,-8,0,9,-8,-3,-4,-7,3,-7,2,-3,0,9,4,9,-6,9,-7,-8,4,-1,9,7,6,-7,9,-5,1,1,-4,-1,9,3,9,-3,5,7,1,2,-8,-8,2,8,-4,-6,4,3,6,-5,6,-6,4,3,6,9,0,7,1,-5,-4,-6,2,9,3,7,-5,-4,0,3,-5,3,7,5,-6,5,5,9,6,3,0,3,1,5,8,-4,-7,-6,0,7,5,1,6,-5,2,-2,8,-7,-6,-6,-8,8,4,-8,1,-3,6,-1,1,1,-7],[5,7,6,-1,-1,0,-9,6,6,-6,-1,-2,-9,-2,6,-9,8,-2,7,-4,-3,5,1,3,6,8,-2,2,5,1,-9,7,-2,-9,5,-5,1,-3,-7,-3,1,7,-2,-3,0,-1,-9,-7,9,5,0,4,-8,6,-4,0,-8,6,9,8,-6,-5,-3,4,1,5,5,-5,2,9,4,4,5,-3,-5,8,-7,8,7,9,-5,-9,5,3,-2,-3,8,5,-5,3,5,2,-6,1,2,8,1,3,-8,-6,-8,4,-5,1,-8,3,-6,4,7,9,-9,3,8,1,-8,-1,-2,0,-2,1,-8,6,1,3,-8,6,2,-5,3,7,3,-6,3,-8,-3,-8,-9,4,5,-5,-6,7,-2,0,2,-7,7,5,2,1,-6,0,5,1,6,-2,2,2,-1,8,-6,6,2,-4,6,-1,8,5,4,6,6,5,9,6,-5,2,-4],[-2,5,7,8,-1,-5,-2,2,-2,-6,-3,-9,9,-7,-4,-3,1,-7,-9,-9,9,-5,3,2,3,-3,-8,5,4,9,-1,9,8,-5,8,4,-2,-9,1,-2,0,8,6,-6,8,-5,1,-1,6,1,-5,-4,-5,8,-2,-1,1,6,3,-5,6,-2,9,3,-3,2,-8,8,9,2,-9,-1,9,-6,-5,-4,4,-4,-1,5,3,1,7,-1,2,2,2,7,-9,-9,9,-6,4,-7,-7,7,-4,0,-6,7,9,5,2,-5,2,0,-8,1,-8,-9,2,-5,-5,7,-8,6,-4,-7,7,8,-8,-7,-3,-7,-2,-4,-9,7,0,-3,-1,8,-2,4,-2,2,2,8,-9,8,0,-6,-7,-5,-3,-9,-6,2,1,-5,-6,0,-4,5,-7,1,1,-9,-2,4,-5,-8,-3,4,-8,6,2,4,-4,2,9,6,-1,1,2,5,-7],[8,-4,-4,5,-6,-8,9,-6,-4,4,-5,9,7,3,8,5,8,4,4,3,7,3,-8,-2,5,-7,-5,4,-5,2,-2,-4,-4,4,-8,2,-5,9,-4,-8,-9,4,-6,-2,-8,1,3,7,-1,9,0,1,8,-6,7,-1,-3,7,3,-4,-3,-5,2,1,-8,2,-3,2,-5,9,-8,-8,4,-9,-7,-4,1,-1,-5,9,9,-4,4,-1,-5,-5,-3,1,3,5,2,3,-4,7,-6,3,2,-2,2,-9,-7,1,-5,-8,7,-1,9,-4,-1,-6,-4,1,-5,-3,4,2,-5,-9,3,-5,7,-1,-8,-4,2,6,-5,6,1,0,7,2,7,0,-7,-7,-7,3,5,8,-8,-7,7,-5,-2,-6,-6,7,0,2,8,-3,4,9,4,2,1,-3,-9,6,0,-1,5,-7,-1,8,9,-1,7,5,6,7,5,-5,7,-3,-5],[-5,1,-4,6,-9,2,6,5,6,0,-6,7,3,-6,-7,9,-4,-9,-8,-9,-1,3,-6,6,0,7,-5,-2,-5,0,1,9,5,5,3,1,-1,0,0,-9,7,9,-8,8,0,-9,-5,6,9,2,-6,0,6,4,7,-6,-8,0,6,-8,9,0,2,1,3,4,-4,-9,-7,-4,4,4,1,-1,9,-4,-6,2,-8,0,-3,-8,8,-8,-1,4,7,1,-7,0,1,2,0,-6,-9,-7,2,-3,-5,-1,5,9,7,4,1,9,7,8,-5,7,-7,-7,7,0,-7,-9,9,-8,-6,-9,-1,8,7,-1,-6,-9,9,6,-7,-7,0,-3,7,-9,-9,6,4,-2,-2,6,-2,-5,-2,-8,-4,5,5,7,-7,6,9,-7,9,9,4,6,-6,2,-3,-6,-3,2,1,5,5,-1,-5,-7,2,-4,5,8,-9,-3,3,2,-4],[4,-5,-4,-5,-4,8,1,6,3,3,5,4,7,7,8,3,-5,2,-3,-1,-4,4,6,4,9,-1,2,-6,-9,5,6,6,5,1,4,-5,-5,4,-5,2,0,-9,7,-6,-4,9,5,-3,4,-5,5,9,-5,6,6,5,-9,-3,7,-6,5,7,8,2,6,8,-9,-9,4,-1,-8,7,-7,4,-4,-2,-6,5,-5,3,-7,1,1,-2,6,4,4,1,9,-1,9,-9,9,8,1,-4,-9,1,0,-7,8,-6,9,-2,3,0,6,-4,-3,-2,-6,7,-4,-6,-2,4,-8,-8,-2,-8,8,0,-3,-2,-1,0,0,6,-1,9,5,-6,2,-1,-2,-5,2,3,-8,-3,6,-1,-2,0,3,3,7,7,-8,-5,0,-6,-5,3,-8,7,-4,3,-7,-1,-2,-2,-3,9,-3,5,-8,-9,7,-2,6,-8,-9,2,-5,-2,3],[1,4,5,3,3,3,-2,8,6,-6,-1,8,4,-8,-3,-3,9,9,0,-6,3,0,0,-7,6,6,-4,-1,0,8,9,9,3,-3,-7,-7,-4,7,0,-1,4,3,5,1,9,-8,0,3,4,9,-1,-9,3,-1,-5,6,7,0,3,6,6,8,2,7,-2,9,8,0,-3,9,3,-3,-8,7,-8,9,8,-6,2,-6,7,5,8,1,7,-7,-5,7,-5,0,2,3,4,-1,2,0,6,-6,-2,-5,-1,-7,6,1,-4,7,5,1,1,1,2,-2,7,7,-3,1,-3,5,-7,9,-4,6,2,9,7,9,-7,-4,9,4,9,-3,-5,5,1,-3,5,-6,8,2,9,2,7,0,-9,-9,-8,-1,9,-2,-8,5,-1,9,-5,-5,7,6,6,4,-8,0,4,3,-7,-8,7,-6,-6,-1,-1,-3,-6,-4,9,8,5],[-2,1,-8,6,4,7,4,-3,3,4,5,-2,-3,9,3,2,3,8,-6,4,6,8,7,-3,3,-1,1,1,1,-8,5,8,1,-8,-5,-9,6,0,1,-3,8,-6,6,8,5,9,-8,8,-5,-4,5,-3,9,-5,-1,-6,4,-5,-6,-4,-7,7,9,-5,4,3,-9,5,-6,3,-8,-5,2,-9,5,0,-7,7,-3,-1,0,6,5,6,-6,-6,-1,-2,9,2,2,-9,6,-3,-1,-1,-9,-8,7,2,-9,-7,7,-1,7,-1,-7,-6,-7,-1,2,6,-9,-3,1,6,-3,4,8,4,-3,6,0,-2,2,-7,-3,-2,7,1,-5,-1,0,-3,5,-2,-6,-2,9,-5,-2,-4,-2,-5,-7,-7,6,-2,6,4,9,-7,5,6,-8,-6,0,-4,-1,-5,3,8,-1,5,-7,-2,6,8,2,3,-7,-7,6,4,-4,0,1],[-1,-2,-1,5,2,5,4,5,9,8,6,-8,-8,-6,1,-8,-1,3,-1,6,-8,-6,-7,-2,-3,-1,8,-8,-9,-8,-2,-3,0,-6,-2,8,-4,5,-4,-9,9,-4,2,5,8,-1,-5,5,-7,0,3,1,-1,6,-2,0,7,-1,7,-4,0,-2,0,1,-1,-9,7,-2,5,-4,0,5,7,-9,-8,8,-6,-1,-6,-2,4,-1,-7,6,2,-4,-4,-7,1,-6,-7,0,0,-9,-5,1,9,5,2,-9,9,-7,-7,1,7,-4,-2,5,-8,-5,3,7,0,-5,1,-3,4,-6,2,7,2,-3,-2,3,-8,-6,3,-6,-7,-9,9,-7,-5,0,5,7,-3,-7,-1,-5,-4,0,3,-5,2,-4,-1,-6,3,-7,3,-2,1,-4,-7,4,-4,8,1,3,-7,8,8,6,3,1,-7,-7,9,9,8,9,-1,8,-7,2,-2],[8,-6,7,-6,2,-6,2,9,8,3,1,5,7,8,-5,-3,-6,-2,-4,4,5,5,6,4,-8,-2,6,5,-7,-6,-6,-4,3,-7,2,-7,-6,9,-6,-9,-2,-6,-8,7,0,-5,9,-8,-9,5,3,-4,4,-3,9,-5,-2,-8,-4,-6,-6,4,-3,3,4,-3,6,6,-5,5,6,6,-8,-7,-8,7,3,-3,5,-4,1,5,1,-7,8,-3,3,-3,6,1,2,-9,9,-4,-4,8,-4,7,2,0,-2,-7,6,-4,5,9,-8,5,3,-8,-8,-5,0,-1,-8,3,-8,5,-8,7,-1,-7,8,-6,-4,-2,3,-5,5,1,9,7,3,6,-3,6,6,-2,-5,-8,-2,7,-2,-6,8,-3,2,8,-5,6,8,-7,8,4,-6,4,4,2,6,7,-8,4,-9,-7,5,-1,-1,-7,8,9,5,7,-8,-7,2,5,7],[5,-1,3,-5,-3,9,-6,-5,0,4,-3,-1,-2,3,-6,7,-8,-2,-6,-6,5,-1,-2,1,-8,0,7,-5,4,-5,8,3,7,3,-6,3,3,1,7,-5,0,8,4,-2,-8,-3,3,3,-7,-4,-8,0,9,-2,0,-5,-1,-7,4,-2,-2,-6,6,5,1,9,-1,5,4,-5,-6,4,-7,0,2,4,-8,3,-5,-2,7,5,0,-1,-2,-5,-4,7,-8,2,-1,5,-8,1,-9,5,-7,0,-4,7,-5,-5,7,4,-2,-2,-8,4,9,3,8,2,1,4,-2,-2,8,8,2,0,-8,9,-8,-5,-2,-2,3,3,6,6,-3,-2,-1,4,8,-5,-5,-5,-4,-1,-8,-7,-9,9,-3,2,4,-1,-9,-2,1,-3,0,-1,6,-5,-2,5,7,1,-5,8,2,0,-1,-4,-2,2,-3,1,-7,-3,-6,2,4,0,2],[7,-3,-3,6,4,5,-1,3,6,2,2,-8,-5,1,6,9,3,0,3,-2,1,0,-6,-3,-2,1,5,5,-3,-7,5,-7,1,-3,9,5,-9,0,-7,3,-4,-8,-8,-8,7,7,8,-4,3,5,5,-5,7,6,8,-9,9,8,9,-5,-8,8,9,-1,-3,1,5,-8,-6,-4,9,8,-2,5,2,-3,-6,-2,0,-8,6,-2,-1,9,-5,-7,-1,8,2,-5,6,7,4,-6,3,9,2,7,-3,-2,-9,-6,3,-8,4,5,-6,-8,-3,7,-3,0,9,-5,-4,3,7,6,6,-2,9,-6,8,6,6,-7,-7,-8,-8,-5,-9,3,0,-7,2,1,4,6,5,3,-1,0,5,9,3,-8,3,-6,2,-5,9,-7,5,6,-2,7,-1,8,-7,-7,8,-8,5,-8,0,3,2,3,4,0,8,4,-4,4,-5,-5,-2],[6,8,-1,6,-2,6,2,5,-1,0,8,-4,-1,-5,-3,8,8,2,1,8,8,9,-6,-1,5,-5,7,-7,9,-5,8,-5,1,3,3,-2,5,3,3,5,5,-9,-6,4,-9,-6,4,-5,7,5,-4,2,8,3,8,-8,1,-2,3,8,7,-1,2,-4,-6,-8,-3,2,1,-9,-9,-2,-1,6,-1,4,6,-2,-3,-6,-3,-6,-6,-7,-1,7,-3,-5,4,-4,-5,-1,-3,-3,5,-2,-8,9,-1,6,-6,-1,0,8,-3,6,-4,-8,5,0,9,3,0,-1,-8,-7,8,-1,4,9,5,5,3,8,5,3,6,6,-7,-7,-3,9,-5,-8,-1,5,-4,-6,-2,-1,7,-4,4,2,-2,1,-2,2,6,5,6,3,-1,1,-1,-9,-3,5,1,9,-2,-9,-6,6,-8,1,-2,5,-8,8,-9,6,-8,-6,-2,5,-9],[6,2,8,8,7,4,5,3,5,9,9,7,-2,-5,6,0,4,9,5,4,4,4,8,2,-7,1,0,7,0,-3,-1,8,3,4,6,-6,0,8,2,6,4,0,-7,-9,-5,-6,-1,9,-4,8,7,-4,-3,-6,-2,7,3,-3,8,8,6,-7,9,-4,1,8,-9,-2,5,4,-2,-8,4,9,3,8,4,-2,3,3,2,-2,6,-8,-2,6,0,-5,1,8,-2,7,6,2,2,6,3,-7,-3,-5,7,2,-1,3,1,-5,-7,-2,-9,5,3,-8,3,6,-9,-7,1,5,9,6,-4,-3,-9,1,-1,1,-7,-1,-7,0,-1,4,-3,0,1,-2,-6,2,-5,-1,-4,5,9,-6,-9,-8,6,-3,8,-6,-2,1,9,8,8,-3,-6,-6,-2,8,-4,-1,6,-8,-7,5,7,7,7,0,3,1,3,0,3,4,6],[-2,-3,-2,8,2,6,-3,-2,-4,-5,-9,-4,9,9,0,-1,-3,-9,5,1,-4,9,2,-9,-7,-7,3,-5,6,1,4,3,-5,-3,-1,2,4,1,-5,-7,-7,-5,9,4,8,-5,1,8,3,2,-6,8,-4,-3,4,1,9,9,0,-2,4,-3,-7,-2,-7,2,2,-4,1,1,0,1,-8,-6,8,1,3,0,-8,7,2,-3,4,3,7,1,-4,-7,0,7,-7,3,-6,2,2,-4,9,1,-8,-2,-2,9,5,-5,-4,6,-1,8,-7,-2,-7,7,-8,9,0,-4,-6,-2,6,-5,-1,2,-4,9,-4,8,8,3,-6,-6,-2,-9,-2,-9,-6,2,3,1,-4,7,-8,8,-2,2,9,-7,-2,-3,6,9,-3,7,-3,-8,-7,2,6,-2,1,-2,5,-1,1,1,6,2,9,3,-8,7,3,6,-4,-7,1,1,7],[-6,5,6,-3,8,-5,0,-1,-5,8,4,-2,2,9,5,3,9,-4,-8,-9,1,5,-1,0,-1,-7,4,6,3,5,9,5,3,4,0,0,-4,4,4,-2,3,-5,-9,2,-1,5,-8,-5,2,9,-1,-8,1,4,-6,3,-4,2,5,-1,3,9,-4,-3,3,8,-6,4,-5,-3,4,-1,1,-4,-7,4,-4,6,-1,-6,8,-8,2,5,4,4,-4,9,-5,-5,-5,-1,9,4,-5,2,-2,2,8,-7,1,6,-4,-5,-4,-5,-6,6,-8,-9,0,-8,-5,1,-6,-3,6,0,7,-2,-6,0,7,3,3,5,0,-5,-5,7,0,2,-8,2,2,-3,-3,2,-7,-9,-1,1,-8,-7,-8,0,2,-1,5,3,5,-1,-8,-9,3,-1,5,4,9,-5,-4,-7,-6,4,8,-3,4,-9,-7,-1,1,0,-7,-1,-2,-3,6],[9,0,-2,-8,-4,7,5,-2,-9,8,-7,8,3,3,-3,0,4,-2,-9,0,0,1,7,2,1,8,9,-1,0,-1,-1,-6,5,1,4,-8,-2,2,4,-8,-3,-5,-9,7,1,-4,4,1,2,6,-6,2,-2,-6,9,-3,-2,-2,7,-3,-4,-3,-8,1,-6,9,5,9,1,6,9,5,8,-4,-4,8,0,-6,-9,-4,8,0,-4,-1,6,-9,3,3,4,1,-4,-9,-4,-3,6,-4,2,-9,-3,-3,8,-1,2,5,2,8,0,9,-7,7,-9,3,-1,-9,6,-4,7,-8,-7,-7,0,-3,-3,5,3,-6,-3,-4,-4,6,-6,-7,7,3,-1,5,-9,-6,4,4,3,5,-7,0,6,6,-1,4,-7,1,9,-2,-5,8,-2,-7,-8,-5,-5,-9,-1,6,3,0,-1,-4,-7,6,-3,-9,6,5,-4,8,-4,1,6],[0,-6,-4,5,9,4,-2,-7,-9,-6,9,-6,2,-5,5,-2,9,-6,8,8,6,3,-1,5,4,7,-8,-4,-3,1,9,-6,9,-3,-4,9,-5,-6,-7,7,7,9,-6,4,1,5,-3,-9,9,1,-3,-8,0,7,8,0,4,-6,0,-6,-5,1,-6,1,0,6,3,7,-7,-8,9,5,1,-8,-1,-1,-5,-7,6,-5,7,-8,-7,5,-6,-4,-8,-3,-7,0,-5,-7,2,-8,9,-8,3,-5,8,-5,-7,8,4,-8,-5,5,4,-2,1,-6,5,9,3,2,7,0,0,-1,-7,6,1,5,-5,-3,2,9,-8,-9,-1,2,-7,-4,0,6,8,3,3,3,-1,-8,-6,-9,4,-8,-9,-8,9,-6,4,9,7,-6,-8,-1,4,-4,1,0,-4,-1,2,4,-7,0,-2,3,4,5,7,3,5,-6,0,-7,2,9,-4],[-4,-4,-5,-1,-8,-5,3,-9,1,-3,-6,-1,8,-5,-9,-1,-3,-8,-1,2,-1,-8,-6,-6,-5,4,7,-3,7,6,-5,-8,8,-3,2,0,0,7,-2,7,-4,6,8,8,9,-1,-9,1,-6,-3,3,-8,1,2,9,-4,-4,-1,1,-3,-9,5,7,-8,-2,1,7,6,3,5,-5,-9,-8,5,1,-5,-7,-7,0,-3,-2,-7,1,-5,-2,-1,5,0,5,7,-8,3,-7,8,1,-6,4,4,-7,-1,-2,2,-8,4,-2,-5,7,-3,6,7,0,9,-1,7,3,-2,-9,-6,-3,-9,4,2,-1,-7,8,6,1,-1,-7,7,7,1,3,-2,-6,4,4,8,1,-9,-2,-5,-8,3,-6,2,4,-8,3,-1,-1,2,2,0,7,1,9,-2,-1,1,-6,0,-8,1,8,-9,3,-2,8,6,9,7,-2,-6,-3,0,7],[-1,-1,5,-5,-5,7,6,-2,3,5,0,-8,-8,1,-7,-2,8,8,3,4,5,-1,1,8,5,-3,9,5,3,-2,-3,7,-8,3,3,3,4,7,-7,-8,6,7,-2,6,2,2,5,7,-5,-6,5,-1,-1,0,6,8,-2,-1,-2,2,6,-8,-2,2,-3,9,-7,6,4,7,0,8,8,4,8,7,7,0,4,6,3,-3,-5,7,-1,-9,8,-4,-6,3,9,8,-9,-2,-8,-9,8,8,-9,9,-6,7,2,-8,-5,-7,-2,-6,-5,-6,7,-8,2,-5,5,-6,4,5,-7,9,4,-4,8,0,-3,-7,-5,-8,-6,8,4,5,-4,-3,-8,-4,4,-4,6,-3,-2,-1,4,-7,-1,-2,-4,-4,5,-4,0,-7,-1,8,-5,-1,1,8,5,-2,-1,-8,6,-6,-7,2,-9,-1,9,6,0,-2,-3,-6,-7,5,-6],[3,6,2,-7,-4,5,3,6,-6,-3,0,6,3,1,5,-4,2,3,2,3,0,7,-4,2,3,-1,-6,9,3,-1,-9,6,-1,-4,-7,6,6,-7,-3,8,-9,-5,2,6,-1,7,0,2,2,-6,4,9,-3,3,-4,-2,-5,-9,-4,3,1,1,-7,0,2,7,4,3,1,-4,7,3,-7,-9,-9,-9,-4,6,-5,3,-7,-8,3,-8,7,-9,-1,-6,-7,3,8,1,6,-7,-7,0,-5,5,3,5,3,-2,3,-2,-7,-7,2,2,2,4,8,-4,4,-2,3,4,-3,-1,-3,0,1,-1,-7,-2,-4,5,-5,-7,-3,5,8,-4,5,1,2,4,2,0,-4,5,-8,6,-1,-4,-6,8,-2,4,-1,-3,-2,-3,2,-8,-7,4,4,-2,-4,-9,9,-1,8,-8,-3,-9,-5,-5,-4,-7,-5,-5,2,-3,3,3,-6],[-5,-7,4,-8,7,3,2,2,-2,4,3,5,8,-6,6,4,7,5,-7,8,7,4,-9,0,-7,-9,3,-8,-4,2,-6,-6,-1,0,8,-7,-2,-1,-2,-3,6,8,-9,8,-4,-6,1,-2,-1,-1,-2,-3,-5,-7,-7,-4,-2,-1,1,-9,8,7,2,5,5,4,-2,-6,-5,9,-3,7,-9,3,-5,-6,7,4,-2,9,2,-8,9,-8,0,-9,7,1,6,-4,-6,5,-8,-7,5,-9,-8,3,8,-8,3,5,-7,-9,-6,-3,3,6,0,3,6,1,6,5,-2,-9,-5,-3,-1,-9,8,-7,-7,7,-8,8,-6,-7,0,-4,-3,-6,-9,-1,2,-3,7,6,-9,0,-4,-5,1,5,7,5,-1,-6,-8,-6,1,-1,-7,-2,0,-3,5,-5,6,1,-3,6,-7,9,6,-4,3,-3,8,8,9,9,-7,-7,9,5,2],[-4,4,-9,-2,0,-1,0,9,-3,-5,0,-3,0,6,2,-7,9,6,-1,1,-3,8,0,-6,1,4,-4,8,6,-9,5,8,8,-6,-7,-6,9,8,-8,4,2,9,4,-3,8,6,6,8,0,1,4,6,-4,-4,-4,-6,9,-7,3,-5,4,-7,-7,-9,6,9,8,-3,7,6,-2,5,8,-2,-5,8,-6,4,-6,3,-7,-9,3,-8,3,-8,4,-3,1,2,-8,5,6,7,8,-5,7,5,-9,-7,-3,5,-5,-6,1,-5,7,2,8,-6,-4,-7,6,2,-3,4,-6,-9,-5,-9,-1,-8,0,9,6,-9,-8,6,-4,-4,-6,8,9,-5,1,-9,8,4,2,-6,-7,0,-8,2,-9,-4,9,5,1,-6,-5,-6,-3,-4,-1,0,3,-3,9,-9,-7,6,-9,3,2,2,2,-5,-6,-9,7,5,-6,-5,9,-1,-4],[-2,9,8,5,0,6,9,7,-2,2,-6,1,-3,-6,2,6,-5,-1,-7,6,-8,7,-5,-1,0,2,-5,7,7,5,8,-8,-1,9,-7,5,-7,-2,-1,9,1,0,0,2,-9,6,-8,6,8,-1,5,-3,0,5,6,7,9,-3,3,1,-2,-8,8,8,-3,7,-1,0,-9,5,-7,-8,-7,4,7,-8,9,1,2,5,4,-4,-3,8,2,-4,0,4,-4,-2,7,8,3,7,2,-5,6,-3,9,-6,-6,3,-1,-5,6,1,-1,4,-4,3,9,2,-1,3,-5,-5,7,9,-8,-2,-1,4,-6,-8,-3,-3,1,-1,-8,3,-6,5,-9,1,-5,-3,-1,4,-8,6,9,-5,-2,-9,-6,8,-5,5,-3,3,-2,-5,8,-2,-6,-7,0,-2,-7,4,8,4,-6,8,0,-8,-7,-6,4,7,3,4,-6,6,5,6,8],[-2,-6,-9,5,1,-3,9,1,2,2,7,7,6,-3,5,3,-4,-5,-7,-9,-7,-1,1,-2,-6,-8,3,1,9,-2,1,-8,2,-3,3,4,1,4,-5,-6,-9,5,-8,-6,-8,-4,1,3,2,-2,-9,6,2,9,-9,6,-3,4,8,-8,6,6,-8,-9,-4,-1,4,8,-9,2,-2,2,-4,-9,-1,-3,5,5,-4,-7,4,2,-9,9,6,8,-9,-6,9,7,9,2,5,-7,3,5,-5,-2,-5,9,-9,-4,-9,-3,-7,9,8,-6,5,0,-1,2,-7,-7,3,9,-5,5,-8,7,-7,-1,7,8,9,-3,0,-8,5,-7,0,-7,-2,3,-1,2,0,-6,6,-9,2,-4,-2,7,-1,9,6,-6,0,-8,-4,5,-1,-9,6,-4,-8,-1,0,-6,6,3,8,-6,0,2,-7,-5,-1,1,-4,1,-7,-9,-2,9,-3],[2,5,-3,5,1,9,2,1,0,7,5,-7,-2,-7,-1,-9,0,4,-7,2,2,9,-6,-5,-7,-2,-4,-1,7,7,8,-6,6,-6,1,-9,1,-8,7,-2,2,0,6,4,4,9,1,-6,-7,6,-3,-1,0,4,1,-2,-1,7,1,-3,-8,-9,6,-7,5,-1,2,-3,9,4,2,7,3,2,-6,-6,-8,0,2,-2,2,1,1,-6,6,-3,-3,6,-8,-9,2,0,2,-1,-8,-8,-6,4,-4,-7,6,-6,8,9,6,5,-6,7,7,0,-2,-4,-9,-5,-2,1,-3,-8,5,-8,2,5,0,-9,6,3,3,2,-5,-6,-9,8,-3,9,-8,-2,5,-3,-6,2,9,1,7,9,7,-2,2,6,-6,-2,-7,0,2,9,-3,-6,-8,-6,4,3,-2,7,8,2,5,8,6,2,6,0,2,1,6,3,2,3,0],[2,-6,-9,1,-4,-6,-5,-2,3,-2,-8,9,-9,-3,-1,0,1,-1,1,4,-7,-9,-5,6,1,3,-3,3,-7,0,6,7,8,-4,-1,-3,-9,-9,8,7,2,-5,4,7,-6,3,5,-3,2,-7,-9,3,5,-1,1,2,3,-8,7,-9,5,1,-7,1,6,0,7,6,9,-8,4,6,-4,-4,-1,-9,5,-3,-3,0,-4,1,4,-7,0,9,5,5,-9,-2,8,9,8,-7,-1,9,-5,7,-5,5,-2,-5,-6,2,-6,6,6,4,4,-9,-6,4,3,-4,-4,-9,3,1,0,5,-6,-2,-4,-7,-1,-9,-9,-9,-4,9,-5,-5,-7,8,3,-3,-8,8,9,-4,8,8,-5,6,-5,4,6,-6,7,-4,-3,6,-9,6,-2,-6,-5,-3,8,2,-4,-7,4,-7,7,-2,6,-1,1,-8,-6,0,-7,-8,-4,0,3],[2,3,8,4,0,-4,3,5,1,5,-6,-6,5,4,8,6,4,-2,-2,-7,1,-7,-2,5,-4,-7,9,7,3,9,2,5,-3,3,9,-9,7,2,4,7,-8,-4,6,0,-7,-5,5,-5,9,-4,-8,-6,-3,-1,-9,2,7,-8,-9,7,-5,1,-6,4,9,8,4,-8,1,-4,-6,-4,-2,3,4,-4,3,-7,1,8,3,-5,-1,-7,0,-3,6,1,-7,1,-2,1,-3,0,-4,-9,-5,-3,-2,-4,0,9,-5,7,9,-1,2,9,-9,-1,-2,-4,9,4,-6,2,2,-2,5,4,-7,-7,-8,4,-3,-1,-7,-4,-1,-7,-9,4,-3,7,-8,-8,-2,1,-8,3,-5,2,6,0,-9,-3,4,2,7,-2,3,5,9,5,-6,-8,-4,8,-3,-3,9,7,-9,6,2,8,0,-7,8,-6,1,2,-3,3,2,4,5],[-7,4,6,7,5,4,7,3,-4,-1,-1,-5,0,-2,-6,-1,0,6,3,-9,2,-7,-2,9,-1,0,9,7,8,-7,1,9,6,3,8,-2,8,7,8,-6,-5,4,7,-8,4,2,2,-7,5,-6,7,7,7,-5,-7,5,-5,-3,-6,-3,-2,-9,-8,-6,-1,-4,-3,3,-4,9,4,-4,4,-1,-3,-6,6,2,-2,-8,0,3,2,-4,-5,-2,-3,7,0,-6,-9,-4,-2,1,0,-7,6,2,-5,0,-8,-6,0,9,-6,6,1,-8,1,-8,-4,5,8,9,2,-5,9,7,-3,-7,-3,8,7,-1,5,2,7,4,-2,1,-9,6,-1,0,-3,1,-9,-8,-9,6,0,-1,9,-1,-4,9,-8,-7,-6,-8,-7,4,-6,4,-3,-2,8,-7,4,3,-3,3,-8,-8,-2,7,-8,0,-5,-8,0,2,-1,3,-9,0,1],[3,4,7,-8,2,1,0,-7,7,-6,-1,-1,-9,9,-5,-3,0,3,3,-5,5,-7,7,2,9,-4,-2,-8,-3,-8,-8,-5,-5,-6,0,2,4,0,2,8,8,6,4,7,7,-7,-7,-9,-8,5,-8,8,8,-9,-3,5,3,-4,-2,3,-7,2,-1,6,4,2,-1,-7,0,4,-2,6,0,6,4,2,-9,5,2,-2,-7,0,8,-5,4,4,-6,8,0,-4,-2,-1,-5,6,-3,-7,8,-8,8,-3,-8,-5,8,0,-2,-8,-9,-6,7,7,-4,-3,8,-7,9,9,-3,4,-7,3,9,9,-5,1,6,-6,4,-2,2,-9,7,-5,4,-4,6,6,3,-6,1,-6,7,7,-7,-2,1,1,6,6,7,4,2,8,-6,0,2,-3,-3,1,1,6,5,1,8,-5,-4,4,-3,9,-9,0,-1,-3,9,2,-1,-4,-4],[6,7,9,2,9,-7,3,8,-3,-9,-8,1,1,6,-9,6,5,4,9,-4,-8,-2,1,-8,3,9,3,0,4,-4,2,4,-8,-3,9,3,-9,2,-8,6,5,2,2,-6,-1,9,0,8,1,-9,-2,-4,1,-5,-2,-1,5,4,-1,4,-9,-9,-5,2,5,0,5,-7,-4,-6,-9,7,-8,-2,-7,2,-1,8,2,4,7,-7,-9,4,4,7,-6,-8,-1,-1,-9,-1,-3,3,-1,-3,-3,-3,8,2,-8,-5,-6,3,3,0,-3,6,-6,3,-2,8,4,-8,2,-5,6,-5,3,1,5,0,7,-6,4,7,-2,4,3,1,-5,-4,-7,3,-8,1,-1,-8,3,-7,-9,-4,0,0,-5,3,4,-5,0,3,-1,-3,2,8,7,7,-4,-5,9,6,7,-9,2,-7,4,-2,-6,4,-4,-5,-5,8,-8,-9,-7,7,2],[-8,0,-7,5,-9,-8,-3,4,3,4,1,-6,-3,8,3,4,-9,5,9,6,2,-5,2,-2,8,0,-3,9,-6,-6,-2,0,-3,0,-9,-9,-8,3,-8,5,2,-8,3,-4,-6,-9,0,3,6,-4,-8,-5,-4,3,4,7,-3,5,-8,-2,-4,-6,-5,2,3,6,-8,0,5,3,6,4,-2,7,-1,-7,0,-3,3,-3,-8,3,-9,-6,5,-4,-5,-3,-3,-2,5,-6,0,-6,-5,2,8,2,1,0,8,-5,-5,1,-5,-5,-1,-2,7,6,-5,-7,-9,-3,-7,8,-9,-2,4,-1,-3,-8,2,7,-2,9,0,-4,-7,-5,-6,-5,2,-5,7,6,0,1,6,4,-2,-4,-2,-9,5,8,3,5,-1,-7,5,-1,-3,-3,1,-5,-9,-4,-8,8,8,7,-5,-1,0,5,1,-5,-9,-3,-7,-3,-6,5,-2,2,-4],[6,-4,-3,1,-1,-2,-6,-8,-3,-9,9,-4,-8,5,-4,-5,1,-6,0,-6,-7,7,-1,-8,-4,-8,6,-1,0,-6,-2,-7,-7,4,9,-5,-4,4,7,5,0,-1,-6,8,4,6,0,-8,7,0,9,3,-5,1,-9,3,4,-4,-1,5,-2,6,6,-9,5,-8,7,7,3,6,0,-5,2,9,1,-3,7,-6,0,-2,9,8,0,8,-1,-7,2,8,-6,-4,-4,-8,5,-3,7,-6,0,-6,-9,4,3,3,4,6,8,-6,4,6,8,-8,-5,-3,3,2,-6,-7,-9,-8,-5,-3,-8,4,-7,1,-5,-2,2,-6,1,-4,-9,5,-6,-4,-2,4,-2,4,4,5,5,5,-9,2,-4,5,4,-8,-3,7,-7,-4,-7,9,6,4,4,-7,-4,-6,-5,8,3,-1,-6,8,-6,-8,6,-7,2,-6,-2,0,2,9,-8],[5,-6,1,7,0,-9,-4,-5,-1,4,4,7,5,-1,-5,1,2,9,6,3,0,4,-1,-3,-5,0,-9,5,-6,-2,9,-3,1,4,-7,0,-4,4,8,-2,-9,-1,7,6,-3,6,-3,0,1,0,1,2,4,-5,3,-2,7,-2,-7,1,-2,-3,-6,8,-6,5,-8,-7,-9,0,-6,4,9,7,3,3,-8,9,7,7,4,1,4,7,-8,6,-9,-7,0,-5,2,-5,-4,6,-7,-2,3,-4,8,2,8,9,-1,8,-3,-3,2,-1,0,-8,0,7,2,1,-2,4,-2,-5,-4,7,6,5,8,2,-3,-7,5,-4,-3,1,8,5,0,0,2,-5,8,7,1,-9,0,9,-4,-7,5,-4,6,-7,-2,5,-5,-8,2,-5,0,4,-9,7,4,-9,-5,3,2,3,-5,-6,3,9,5,-3,5,7,5,-1,5,-5,-1],[4,-4,-7,-8,-8,-4,6,4,-5,7,9,-7,7,3,-6,-7,8,5,-7,9,6,6,0,4,-2,7,-4,-5,-1,2,-1,-9,8,2,8,-2,7,7,-2,-8,2,-4,8,-1,-9,2,0,0,5,5,5,2,4,7,4,-1,-5,-5,-9,0,4,-5,2,-2,4,-7,-9,-4,1,-7,-6,9,4,-2,5,5,6,4,9,-9,-7,-9,8,8,0,-5,5,9,-6,0,5,0,1,1,-3,6,1,8,-6,-9,8,-1,-8,4,4,6,1,0,7,-1,-7,6,-1,3,9,-4,-3,-7,6,3,4,-5,1,-1,9,3,-5,1,5,-8,2,1,-6,8,8,-5,-9,-8,-6,-2,4,8,6,2,1,-5,8,2,-1,-4,8,5,0,4,3,-9,1,9,8,8,9,-5,4,9,5,1,5,-6,-3,-8,-7,5,6,4,-5,-3,4],[9,2,-9,5,4,-3,5,-4,-9,6,-1,5,-3,8,2,6,0,-1,-7,-7,8,-7,7,-3,5,-1,-2,-2,-7,2,9,-9,-4,8,4,7,4,-6,7,0,6,-2,-9,6,0,6,9,7,7,1,9,9,9,9,9,-5,-1,1,1,8,6,-4,-6,8,-2,5,-4,-8,7,6,4,-9,-7,3,3,1,1,3,3,-1,-2,7,-6,-6,-8,8,-4,5,-4,-6,3,4,-1,-2,-7,9,-3,-1,-5,-5,6,8,7,4,-1,-3,3,-6,-8,-4,-5,-4,-9,6,-4,-8,-9,4,-6,-5,-7,2,3,9,-8,-2,-1,-9,-7,6,4,9,5,2,9,-9,-9,-4,9,-7,-3,4,0,2,-6,5,-7,5,6,-9,7,1,2,-3,6,7,-1,3,-2,-6,8,-2,-2,9,7,-7,7,9,1,0,-8,-8,-6,-3,-3,-5,-4],[-9,-3,4,9,-6,-5,5,4,0,-8,6,0,7,0,0,-8,8,0,-9,3,-7,-5,5,-5,7,-6,-5,-4,-4,-9,7,-6,5,0,7,-6,-7,-4,4,7,6,-2,2,-3,8,-6,-4,-8,4,6,-7,6,-5,7,-8,1,-4,-6,8,-4,-6,-5,7,-6,-3,-8,5,-3,-1,7,-7,4,-3,4,8,-6,2,-7,4,-8,-7,3,3,-4,-8,-5,-1,3,-2,-3,3,-9,7,-2,3,-1,-4,8,1,7,0,2,9,-6,-4,5,-8,-8,-5,-7,-4,5,1,-7,7,-5,9,-9,7,-1,7,6,0,-6,-7,8,2,1,-1,-5,-1,8,-2,4,-8,3,-5,8,8,-5,9,-8,-1,4,-1,-7,6,3,9,-9,6,8,1,9,-3,-7,7,9,-5,2,0,3,1,4,-9,2,-4,4,5,-7,4,-9,-7,2,3,-1,-1],[4,-1,-6,6,4,2,1,5,3,9,-9,-8,-2,-7,2,-4,-9,7,9,8,0,4,8,-8,-7,-8,-9,-1,-7,-3,5,-9,9,-4,7,-5,-7,3,2,6,7,4,2,2,-4,-5,-4,-2,9,-1,9,5,1,8,-4,-9,-5,2,-7,2,-8,6,7,6,-1,0,-7,4,9,7,-2,3,-3,8,3,0,8,8,-3,6,1,-7,-3,-8,-4,5,-7,-3,-7,9,3,-5,-6,4,-3,0,6,2,-6,8,-1,5,-3,-2,-3,-7,6,-4,6,-3,-5,3,5,1,4,-5,-6,5,2,-4,3,-4,-2,-8,-6,6,-3,3,2,7,-2,-8,-1,-6,6,2,5,4,-8,-1,2,-7,3,9,-2,3,9,9,8,8,4,8,-1,-4,-5,-9,-4,-4,-9,8,4,-1,-1,6,8,-4,-2,-3,2,-9,-7,-4,8,-4,-5,-5,-2],[0,0,-2,7,-4,3,9,-7,2,4,9,-8,-6,8,-9,9,4,-8,3,2,-2,-7,-8,9,-4,2,-8,-8,-8,-1,-3,4,9,-6,5,5,-2,-3,4,6,-7,-4,9,-8,2,3,9,-8,-6,-4,-7,5,5,-7,-1,7,-3,5,1,-5,-7,9,-7,-1,-7,7,2,-6,-2,0,-7,-6,1,-6,6,3,-8,4,-5,-6,-9,8,6,-9,-6,2,0,9,4,-1,-5,-6,3,-7,-6,8,7,-1,-7,-6,-9,8,9,1,-3,-8,6,-5,2,1,-2,4,1,-3,0,3,3,8,-2,-2,5,-3,-1,7,-9,0,-6,-5,-7,7,-2,-1,9,-1,-8,-1,1,1,8,9,2,-4,3,-7,-5,-8,3,-2,4,3,-1,-3,8,4,-6,-5,-5,-4,-3,-7,8,1,4,9,-1,-7,-2,-5,6,-7,2,-6,-2,9,2,-2,-6],[8,-1,-2,-1,0,-9,-4,5,-2,-8,8,-6,8,-1,6,-2,-1,6,6,5,-7,1,-5,-1,2,-5,4,8,-1,-3,-3,2,4,-9,0,3,2,-5,4,-2,-4,1,4,-9,6,3,3,-6,2,4,6,9,0,0,7,-7,2,-3,1,-9,-8,7,5,1,6,5,-4,-7,6,-6,5,-3,-9,7,3,-9,0,6,-3,4,-2,-9,4,-6,-6,-1,-9,-8,2,-5,4,-2,-4,9,-5,-1,-4,-8,-8,-8,5,-9,9,5,7,4,-3,4,-7,4,-1,4,0,1,7,5,-9,5,-6,8,9,2,1,-9,4,-1,8,-4,6,5,9,7,6,7,7,9,5,-5,2,-4,-8,2,1,-5,1,3,8,8,9,6,-2,-7,5,6,9,-2,-6,-4,0,-7,4,1,4,6,-6,-2,-9,-7,5,-4,9,-9,9,8,7,2,-1],[-5,7,-1,-4,-8,-6,-4,7,7,3,-3,7,6,1,2,5,0,-2,-1,-2,0,2,3,5,8,-2,9,7,3,-5,7,-2,-7,2,-4,-9,-7,-4,-6,-1,-7,-4,-2,5,-3,-2,-2,3,-3,-1,8,-5,8,5,-5,-4,-9,9,5,7,-2,-5,-3,4,6,3,-5,9,-6,-5,6,-5,-7,-6,-3,-9,5,6,8,4,-8,7,5,2,5,-8,-7,-7,-8,-1,8,-3,2,6,7,-5,-2,2,-7,9,1,-3,-6,-2,4,2,5,-8,-4,5,2,-7,7,0,3,-3,5,5,-4,7,-3,1,-5,4,0,2,8,6,-7,-5,8,4,3,4,6,1,-4,5,3,6,-2,1,6,-3,9,0,9,7,-4,1,7,-5,-6,0,-8,4,6,2,5,7,3,-8,-4,-1,-3,2,6,2,-2,7,8,0,-5,-4,-5,7,-4],[-2,-9,-6,1,-7,-9,7,8,6,-2,8,6,2,-9,-4,-1,0,-8,3,2,4,5,9,-6,6,6,-3,-9,9,9,-9,3,4,-3,9,-6,-9,9,-4,-9,5,9,-8,-2,1,4,-8,-3,-6,5,-8,-1,0,-7,8,-7,1,9,-3,4,6,9,4,-3,-2,2,2,-5,2,0,9,1,-4,6,-7,-4,3,1,-9,-3,-7,-4,8,8,0,0,5,-9,-7,7,6,-6,-1,-8,8,4,-2,0,2,-7,9,0,7,-9,-3,-1,-7,-3,-2,-9,-1,-6,8,-6,3,7,-2,0,8,0,-5,-2,-8,9,-7,2,-3,0,-4,-8,1,-1,8,9,6,-9,-9,0,4,-8,-8,3,-9,-4,6,-7,0,2,7,1,-9,-6,-8,-8,9,-5,-7,-4,-3,2,1,0,-3,4,-7,-2,0,5,5,-6,4,-9,4,8,-8,4,-4],[9,1,-3,2,4,6,-5,8,-4,3,-7,3,7,7,5,-2,-4,6,-9,8,-8,4,2,8,-9,6,-5,-7,6,-2,5,-2,2,-9,-6,0,2,3,-7,2,2,-1,-7,2,-1,-2,5,-1,5,-9,1,-8,8,5,-5,-2,0,1,-7,0,9,-6,2,3,-2,7,-7,-3,9,-9,6,2,1,-3,-1,-9,-4,4,2,8,-7,1,1,7,-5,-3,0,-9,6,-4,0,7,-8,-1,6,3,5,-2,8,-2,2,-4,-9,-1,-4,-2,9,8,0,6,4,4,9,-6,1,1,3,3,-2,5,-9,-6,-4,-5,-4,-6,-7,-6,-4,-2,-9,1,6,-9,2,8,0,-7,-9,6,-1,2,-3,7,-4,-7,1,-9,6,2,0,5,8,3,8,-5,5,-6,-7,-5,-5,-3,5,2,8,2,3,-8,3,-1,7,-6,7,7,-9,8,8],[2,8,8,8,-2,6,-4,9,-7,0,9,-1,-9,2,5,4,9,-3,8,-6,2,1,-2,-6,0,-8,-8,1,-5,-8,-1,0,1,-5,-1,4,0,-9,4,-1,4,-3,-4,-3,-4,3,-9,2,-3,6,-7,-8,2,3,-2,8,2,-9,5,3,0,-3,-6,1,3,-3,4,1,-4,-5,-1,6,-3,5,-2,-2,-6,-1,-4,-1,-7,-6,-6,8,-3,6,1,6,-7,-9,-9,-5,3,-4,-5,-9,-5,0,2,7,-2,-4,7,-8,0,2,-8,7,1,9,0,-6,-2,8,-5,-6,-8,-5,-3,3,-4,4,-3,6,3,-5,5,-2,-5,-4,4,-1,0,8,2,0,-8,-7,9,-4,2,-2,-5,-5,-8,8,9,-2,-6,7,8,0,-7,-9,0,-3,-7,0,8,-3,2,2,-8,1,-1,-6,1,-4,-8,5,-7,-7,-3,6,1,6,6],[-7,9,9,-6,3,9,3,-7,4,-6,3,-8,-1,-1,6,1,4,-8,-2,8,9,6,6,9,6,-6,8,3,-5,-9,-7,-8,6,-7,-8,9,-4,-7,-8,6,-8,-5,0,-4,3,-4,-3,6,2,-1,2,-5,-9,1,-5,8,2,3,-8,8,9,1,-4,-5,0,7,8,7,9,2,-2,9,6,4,-4,-1,8,5,8,-2,-4,0,-6,9,-1,8,7,-6,0,-2,-7,0,-8,-4,-6,5,7,1,0,-4,-2,7,-7,-4,-3,0,5,-5,4,8,-2,6,-6,9,6,2,3,8,8,4,6,-4,0,7,8,-6,1,-6,5,-8,7,8,-6,2,-5,-1,4,-7,-1,3,0,-9,-9,1,2,4,-6,3,0,-1,5,7,-8,-7,-5,0,2,-5,2,-8,-4,-1,-9,4,3,-7,9,2,-3,-9,-8,-4,3,4,-8,-9,6],[-6,-6,-2,5,-3,-9,-7,-4,0,5,-4,4,-3,1,7,-7,-3,7,-3,1,3,-9,-6,-5,-2,1,1,-8,4,1,-7,0,-5,7,3,2,6,-7,9,7,2,0,5,4,0,9,-1,-6,-1,4,8,-4,0,1,-5,-1,7,2,2,-6,-7,-3,-1,-7,-2,-7,-4,2,8,8,3,-6,4,4,0,9,7,-6,7,-6,6,9,-4,-7,3,8,-3,3,-6,1,-2,-2,-7,-1,-8,-7,0,-6,-4,-5,-1,-6,-9,5,4,8,-7,-6,-9,8,-5,9,-6,-7,-3,1,-3,2,-6,-1,3,-2,-9,6,-6,-8,7,8,1,1,-7,-4,-9,6,7,-6,-6,-2,-3,5,-2,8,4,-3,-2,3,2,4,3,4,-7,-7,-3,6,5,4,5,1,2,-1,1,-8,2,4,-2,5,3,-4,8,5,-1,5,-2,9,-5,9,-3],[8,7,-7,6,9,3,-7,0,6,7,-9,1,-5,-4,8,1,-2,-4,0,4,-2,-4,-7,7,5,6,-4,4,3,6,-3,-8,-3,6,-1,9,-8,4,6,-1,6,0,1,4,8,-7,4,2,-7,9,5,0,-5,4,2,-9,-8,-9,8,2,-3,9,-9,5,-6,0,-6,-2,8,8,8,-7,4,-9,-2,9,0,-9,3,-9,-8,9,-7,-7,-7,2,6,-7,0,-8,9,3,2,8,6,5,6,5,3,-9,-7,-3,-9,-4,7,9,-9,6,4,2,2,-4,7,6,-4,4,-5,0,7,-2,-6,-5,8,9,9,-1,4,0,7,0,2,-7,-8,6,-9,1,7,-9,4,-1,-3,-4,-8,-5,9,-9,-5,-1,-1,-7,8,-3,-3,-2,6,-8,-6,4,6,-3,-8,-6,4,0,6,1,-7,-2,0,3,-8,1,9,5,2,-4,6],[6,-1,-6,6,7,7,-7,7,5,-1,-7,7,8,9,-5,-3,8,1,-9,6,7,8,9,0,-4,3,-7,-7,5,-3,5,-5,-7,-6,4,-3,3,6,9,-3,-8,5,1,7,7,8,9,3,8,-1,6,3,-6,-7,-1,3,9,2,9,9,5,3,-5,-2,-7,1,-1,-4,-8,-4,-4,8,-2,-1,3,5,-2,2,1,-7,5,-8,-6,2,-7,-6,-8,7,3,-9,-7,3,9,9,-2,4,-2,-4,3,-4,-2,8,1,1,-2,9,-3,6,7,-8,-6,9,-5,-4,-2,-2,5,-1,-2,-3,5,-1,-2,2,9,7,1,-5,2,-3,-6,-1,6,-8,3,2,2,-8,3,-3,0,3,-4,9,-3,-3,7,7,0,-1,-5,9,8,3,-3,3,-4,-2,-7,5,4,9,-7,3,-1,0,-2,5,-1,9,3,7,1,9,-8,8,3],[-4,-6,4,-3,9,-2,0,-5,4,7,0,-6,-4,4,7,4,-2,1,-4,6,-3,9,8,-4,2,3,0,-5,-1,-4,7,4,-5,-9,-5,0,2,8,-9,5,-7,4,0,5,0,-8,5,0,-8,-7,-5,-8,-8,4,9,1,1,-5,8,2,-7,5,-8,-6,-6,-2,-3,-1,-3,-9,6,0,-6,-3,-7,-2,7,9,-1,3,-9,9,7,-2,6,7,9,-2,-2,-5,-9,4,0,7,4,3,-2,0,8,1,4,-3,-9,-2,5,7,0,6,1,3,6,7,8,-4,-5,-4,-7,4,9,0,4,1,2,4,0,5,8,-5,-3,6,-4,7,-8,-2,-7,-2,-4,8,-5,-1,2,-8,-2,8,6,-4,1,9,-6,-9,2,-1,-8,6,-2,-4,6,3,-7,-8,0,-8,-9,6,-8,-1,-7,-8,-4,8,-5,8,-2,7,5,-9,4],[-4,6,-9,6,-4,1,-5,0,8,-4,-6,8,5,2,-8,1,8,-5,-4,3,-3,1,7,3,8,-6,-3,-2,1,5,-3,4,6,-8,-7,-4,-7,9,1,-8,7,9,-4,-6,6,2,5,-4,-9,8,8,-4,-7,9,5,-6,4,2,0,5,-7,3,-4,-8,4,0,-5,8,8,-6,-9,7,8,6,-3,0,6,-3,-1,-7,-4,9,-1,8,-2,-2,4,-3,1,-6,-4,-8,-6,0,-2,0,-2,3,-2,5,-5,7,-2,9,-7,-4,-4,-2,-2,-2,-3,4,-8,-7,-6,2,0,5,1,-1,8,2,-7,-7,4,8,-1,2,4,5,1,-7,-6,5,-5,-8,-6,9,5,-8,-6,2,1,-8,-4,-7,-9,9,-2,0,-2,-7,-7,2,4,-4,2,9,-4,1,-8,-3,7,7,2,-1,-9,8,5,-7,9,-7,-6,-4,9,8,8],[0,-5,-5,5,5,-7,3,-8,-7,9,-8,2,7,-4,-5,2,-9,7,-9,9,-4,3,-2,-3,4,5,6,-6,5,0,-2,-6,-7,6,9,-1,-7,-6,7,9,-8,-7,-3,6,3,4,8,3,9,0,3,0,7,7,3,1,-5,2,2,3,-9,-5,-7,0,-9,8,2,-8,9,-1,-6,4,-9,9,-9,6,-2,8,8,9,-4,5,-3,4,-7,3,9,-3,7,-9,-8,5,3,-7,2,9,3,1,7,4,0,-6,-5,-3,-8,-7,5,-8,-4,-4,7,-3,-4,-9,1,1,-4,0,0,-8,2,-6,6,8,8,5,5,-7,4,5,-4,7,-2,-1,-5,2,-3,0,4,-3,4,9,7,5,4,-2,-1,-2,7,1,-7,0,8,-9,2,-7,4,1,9,3,-3,1,-1,-3,4,-3,-4,7,9,4,-6,5,7,7,-9,-3,9],[-1,1,-4,3,-7,-9,7,-5,2,6,6,-5,-7,7,9,8,-7,3,-1,0,-5,-4,5,0,5,-7,2,-5,-8,-3,0,-6,-3,-6,1,2,-3,3,-3,-6,3,3,-6,-7,2,-8,5,-3,8,-9,-8,2,0,-5,-3,-6,-5,-6,0,-5,9,-9,6,-5,4,4,5,9,-2,-8,3,-9,-6,8,0,8,4,8,8,-9,8,1,-5,1,-5,-4,1,-9,7,5,-8,3,0,-3,3,-2,-9,-9,5,-5,-5,-6,-6,5,-3,9,2,9,0,5,-2,-5,-2,-7,-3,-1,-9,-4,2,-7,-1,-9,2,1,-4,-9,-4,-9,2,-8,6,3,1,-2,2,-4,-3,4,1,-7,0,9,6,0,-2,-1,-8,4,6,7,-3,6,-5,0,-6,8,-2,-5,-1,3,-1,2,-2,2,6,-4,-5,3,0,-2,0,1,-1,-4,-8,-7,-9],[-9,0,-4,-6,-7,4,9,7,1,5,2,-8,4,-9,-6,-9,-5,4,-9,5,8,7,-8,-8,-5,9,0,4,3,2,3,6,-1,7,6,-9,6,9,4,-4,-8,-1,-8,6,2,6,3,-8,-7,5,6,0,9,-5,-6,-6,-2,-6,-5,-4,-8,-8,-8,4,7,1,-4,-8,8,3,-4,0,-5,-9,-8,6,3,4,1,-5,3,-8,-2,6,8,9,8,8,-4,-3,4,5,0,-9,6,-8,-5,5,5,8,2,-9,-6,7,-8,4,-5,4,7,-4,-5,-6,3,4,4,9,-1,3,2,8,-1,1,2,4,-5,-3,-4,-3,9,4,-1,-1,8,-1,5,0,8,-9,6,7,-8,6,5,-6,-1,-1,3,-7,8,-7,-5,0,-7,-3,-5,8,-7,-5,-7,5,9,0,-2,3,-6,3,-1,7,-6,9,-8,0,-8,-6,-2,-3,0],[-1,2,0,-7,-1,-9,4,5,7,-3,-2,9,9,-1,3,9,-9,3,5,3,8,-6,-2,3,2,6,-8,0,3,-9,-4,4,3,-3,6,-9,1,0,-5,-5,-2,9,-4,6,4,8,-7,0,2,-3,-8,-6,8,-2,-6,0,6,7,2,-4,4,0,-9,8,-4,-8,3,0,6,7,-3,7,-2,-7,0,5,0,-7,-9,9,-5,-2,9,2,-4,-4,-7,-6,9,5,8,0,-7,-7,7,1,4,6,2,-6,6,8,4,3,-5,3,5,6,-1,-1,-6,2,5,8,6,-7,8,-5,2,-3,-1,4,-7,1,-5,8,-4,4,5,-8,-1,-7,-2,8,1,2,-9,-5,-3,3,-6,-5,-7,-4,-3,-3,8,-6,3,8,-2,-4,9,-9,4,-1,-4,4,-4,5,5,5,-2,6,8,8,8,2,-7,0,6,-8,9,-2,-6,-7,-2],[0,-5,-1,1,7,6,-3,-1,1,7,1,-9,-8,9,7,6,-7,-2,9,5,2,-4,3,8,0,8,8,-2,1,3,-8,-9,3,-8,1,-5,-6,7,9,8,-4,-5,3,-7,-7,2,3,1,8,3,-5,-5,-6,2,-2,4,-2,-3,3,3,0,1,-7,6,-5,-8,1,-3,6,-5,-4,9,2,-8,6,4,-9,8,7,-1,-1,-6,7,-2,-1,7,-7,-2,-6,9,6,0,-4,3,2,7,9,3,-2,0,5,-7,3,-8,2,1,5,7,1,-5,-1,8,4,5,-5,-9,9,-2,3,8,3,4,-9,7,-7,-3,5,1,-3,-6,4,6,1,6,7,8,-1,-8,2,6,7,7,5,6,-7,4,4,-1,4,-8,-6,-7,3,9,4,-1,-3,-3,2,-3,-5,4,-4,9,2,6,0,-2,2,5,7,1,8,0,-8,6,-9],[2,9,3,-5,-3,-4,4,0,4,8,-5,-2,-7,-1,-3,-8,-7,2,-4,-8,-7,2,-7,5,-6,1,3,-2,2,-9,-2,2,-1,2,4,-8,-8,-7,-5,0,6,-6,6,-9,6,-8,-3,5,-6,-6,2,2,7,-1,-7,6,0,-2,-6,3,-4,-1,5,1,-4,-4,-8,-2,-3,1,-6,3,3,-4,8,-2,2,4,-6,-8,-9,-2,-8,-1,-9,4,2,-8,9,6,-4,-2,3,6,-8,-1,-3,-4,8,-7,-8,-3,7,-4,8,-2,7,3,-4,-2,-3,0,-9,-2,-8,-9,-4,3,-2,-8,-7,-5,-6,9,7,9,-6,6,-2,7,7,-7,8,-6,6,1,-6,9,-5,-2,2,-7,-2,3,8,7,-6,-3,-2,8,-4,2,-3,7,-7,-1,4,9,7,9,0,-8,6,7,-1,3,-9,8,-5,8,-9,8,-3,-4,6,0,-1],[2,3,-9,-8,-6,-5,6,-7,-2,-2,6,6,7,-4,2,0,6,6,8,-9,2,5,1,-7,4,-9,-2,-7,6,6,1,3,0,6,-1,-3,-5,3,8,9,-6,-3,-5,-2,-1,3,-6,6,8,4,-7,-1,4,-3,6,4,-5,-3,0,3,-5,-6,5,4,-8,1,4,-1,-8,3,3,-8,-2,-9,-3,-8,9,5,-7,1,2,-3,6,-7,-1,-5,2,-4,-3,-3,-5,-4,-7,9,5,6,4,9,-8,8,-8,-3,-7,-6,-4,4,-8,-4,3,-4,-1,-4,-2,-6,0,-8,-6,-3,6,3,6,-3,2,-1,-3,-8,8,5,1,-3,-8,-7,-2,8,3,-9,-8,-4,-7,-7,-3,4,-7,6,-1,3,-5,3,-8,8,-8,-5,4,-5,-9,-8,3,2,-3,3,-7,0,-1,0,-2,-1,-1,9,7,5,5,6,6,2,-6,4,5],[-5,0,-6,-4,2,-3,2,3,-7,2,3,0,6,9,-2,1,-4,0,1,5,8,-3,-2,6,-8,2,9,-7,2,5,-5,-6,-9,5,9,-2,2,-1,0,6,-9,-6,6,9,8,-6,-6,9,0,-7,0,9,-8,-3,1,-4,-2,-8,-6,3,-2,6,-5,3,7,3,5,-4,-2,-1,-4,2,-4,4,4,3,4,6,-3,4,0,5,2,8,1,3,-4,4,9,-9,6,8,7,-9,-3,-5,9,-2,-1,8,-3,-9,8,4,1,5,6,-4,-1,-3,-3,-3,-9,-9,7,9,-4,4,-1,-9,-2,-7,-8,-9,7,-5,-2,6,6,-4,-7,4,5,7,5,-2,2,-9,-9,-8,8,-3,4,5,-7,7,-2,-1,-5,3,1,-3,4,4,-8,0,9,-8,1,3,-5,-4,1,-4,-8,7,-5,-3,2,-2,-8,6,-6,-5,-4,3,7],[2,-3,7,6,-6,-2,-1,9,-6,0,6,4,7,2,1,-5,-3,2,5,-3,7,8,-1,-2,-7,9,-7,-3,-6,2,-9,9,-2,3,5,-3,1,-6,4,-2,6,-4,-4,9,-1,-2,-3,-1,7,1,-3,6,3,-6,3,6,-1,7,-6,8,-9,3,-4,7,4,-9,3,-3,2,7,-4,9,5,1,7,-1,8,8,-5,8,1,3,9,5,2,8,-9,-4,-6,7,-7,-7,-6,2,-9,-1,-7,-1,7,-6,0,-7,1,-1,-5,-6,5,1,9,7,-2,-9,-6,7,-6,-2,-2,9,-8,3,7,-5,7,0,3,5,0,5,-2,-3,5,0,1,-1,6,9,5,-2,1,-4,7,1,-7,-7,7,1,3,2,-8,8,0,1,9,-8,5,7,6,-3,-9,-6,-3,-7,7,3,-2,0,-2,9,-3,5,5,-7,-7,9,0,-9,6],[1,-6,3,1,-3,-2,-3,-6,3,-2,-5,6,-9,8,-5,-5,-7,-9,4,-8,-3,-4,3,7,7,3,-1,0,-3,-3,-6,5,-7,-5,-4,5,-8,7,6,9,3,6,-8,1,-7,-5,-1,-4,5,4,-6,-9,4,2,9,2,6,6,-7,1,-2,-6,-9,8,3,1,6,4,-4,2,6,5,-4,1,-6,6,2,-9,3,-3,-6,8,2,-6,7,2,-5,-3,8,-3,4,-2,9,4,9,3,5,6,6,-2,-9,2,-1,-9,-1,-4,7,9,-7,2,-5,5,8,-1,1,-7,9,-4,3,5,-9,-7,-3,-9,4,3,-4,-9,8,5,8,-6,0,-9,8,-3,-3,-2,9,5,5,4,-9,5,2,-3,-1,3,-5,-4,8,8,-9,1,-9,7,8,-2,1,-2,1,-8,-9,-6,-9,-1,7,2,7,7,8,2,1,-5,7,4,4],[9,-3,-2,-4,6,2,1,7,7,-4,-3,7,-3,-1,-5,1,4,-9,7,6,9,-3,8,-9,-7,-6,-3,5,-3,5,-3,1,-1,3,-5,4,4,-4,8,3,-1,-4,-4,9,8,7,4,-9,-5,9,9,-1,0,-3,-6,7,-3,8,7,-5,5,-2,2,1,1,-6,4,-6,0,-4,-3,-1,7,-1,1,9,-5,-3,-3,5,0,-8,-8,5,3,8,-6,2,0,8,5,9,9,6,8,4,6,3,8,9,-2,9,-9,-2,0,3,3,3,4,6,1,9,-3,0,3,0,-1,2,-9,-4,3,-5,-5,-9,5,9,8,-1,-2,-6,9,6,3,-4,3,3,-3,-9,-4,-9,-9,-6,4,-7,-1,-3,3,-9,-8,-3,-5,3,0,1,3,-9,-5,-5,-8,5,-9,9,7,8,2,-7,8,-4,-4,3,-9,-9,9,0,-4,8,1],[-9,-7,7,9,9,7,9,0,-7,-9,-7,0,3,4,0,2,4,-9,9,-3,2,-7,-8,0,7,6,-8,-4,-4,5,3,0,1,8,9,-3,-2,0,-5,8,-3,-9,5,0,0,8,-8,1,-2,-4,-5,-3,-4,2,0,-7,3,-5,4,5,0,-7,0,0,-4,9,-6,-9,-3,-3,7,9,2,2,-1,-6,-2,-3,-7,2,-9,6,0,5,1,6,-7,-5,5,8,6,-9,7,4,-1,2,0,2,-6,7,1,-4,-2,-3,-2,8,-8,0,4,-1,2,5,9,0,9,7,-3,-3,6,-5,-1,7,5,-4,-3,2,7,0,6,7,9,9,9,-4,1,-8,9,-5,-1,1,0,2,2,-7,-5,-2,-1,-1,9,5,1,3,6,7,0,5,8,-8,-5,9,-8,7,2,5,-4,5,-9,9,9,1,2,1,9,-3,-6,-8,-8],[1,-3,-2,-3,-8,8,-9,-9,1,4,4,-9,-2,-7,-3,-3,5,-8,-9,2,-2,-3,7,7,8,9,9,-6,-8,2,-9,-6,7,-6,-1,6,4,7,3,6,-2,-3,8,-3,-7,-6,-2,-1,-4,-3,-5,4,2,-9,6,1,-1,6,6,7,9,7,-9,-8,-5,-5,9,-3,8,8,7,-9,2,-3,9,6,2,9,4,8,5,-3,6,-8,7,8,-4,5,3,-3,6,4,-7,4,6,-4,-3,4,-4,2,-7,-1,1,7,9,-2,0,4,3,-5,0,5,-9,0,-6,-2,-6,3,-3,-9,3,-7,2,3,9,0,0,-9,-3,-9,-4,4,-8,8,8,6,9,7,-2,7,-4,9,-3,-1,3,-7,-7,3,-4,-8,-6,-2,8,0,1,-4,7,-1,-8,-1,5,8,7,-8,-8,-4,7,8,-6,-5,-2,1,-9,-5,-8,-2,6],[7,8,7,8,2,-9,-5,-6,-3,-4,-2,9,8,5,3,-8,3,7,-1,6,-9,6,9,5,-8,9,6,6,-9,6,-3,-4,-8,5,-3,9,1,7,5,-6,-6,-2,3,4,-3,-9,-6,8,6,9,5,2,-9,-1,-1,-3,3,3,3,5,3,9,7,-9,-1,-4,0,-6,-9,6,-6,6,6,8,-2,8,-7,9,-9,5,0,-7,1,9,-6,0,-9,-8,-4,-9,6,-4,5,7,-6,9,-8,7,7,-5,-9,-6,-2,-8,-5,-3,-7,7,-5,0,-8,-3,0,-8,1,-5,-1,8,0,-4,8,-7,2,1,9,1,4,2,-3,5,3,-9,-9,4,8,-5,-5,-5,0,-9,0,-1,-7,-6,8,6,6,0,1,-7,2,5,0,-1,-9,5,-5,-9,2,-1,5,-1,8,-7,-4,-6,2,5,-8,7,6,-2,2,-9,4,-8,9],[5,6,-2,-7,6,-3,5,-8,-7,-1,-6,-5,6,1,-7,8,-9,7,-5,5,-6,6,1,-4,-6,9,7,-1,6,2,-9,5,8,-7,0,-8,3,4,-6,8,6,9,-5,-9,-1,5,-5,-2,7,-6,-3,7,-8,6,-6,6,6,-7,8,2,9,8,-2,-5,6,-1,-3,8,-1,4,7,5,-2,2,-5,-7,-3,9,8,-7,1,5,2,0,7,0,-6,-3,-8,4,1,-2,5,-5,0,8,1,7,8,-3,8,-9,-8,-4,5,2,5,7,5,8,9,5,1,-5,9,8,5,-9,5,-6,-5,-2,5,5,-2,2,-5,-5,8,-9,8,-3,-5,-9,8,9,-9,4,-1,5,-1,4,-9,-6,9,-9,7,-1,0,3,-7,2,-9,-8,9,2,3,-9,0,8,4,5,-4,-8,-3,-1,-7,4,5,7,-2,1,-2,7,-2,-4,7],[-4,4,-8,-4,-2,9,2,4,6,9,1,8,-5,1,-1,4,9,7,-1,1,0,5,5,-6,5,9,-3,6,-7,-1,-7,8,7,1,1,6,7,-8,2,-3,3,-2,7,0,-9,-3,-8,5,-2,-8,1,-5,4,2,-1,-4,-9,9,9,2,6,2,0,-1,-7,4,-3,0,-7,-8,-7,-4,2,-6,-2,-3,-9,-8,-4,4,9,-2,-4,-4,8,0,7,-8,2,-5,-4,2,2,8,-1,-1,0,9,-1,1,-2,2,7,-1,-4,6,-9,-2,4,8,1,9,-7,3,-1,9,3,-8,1,-3,3,4,1,4,4,1,-2,9,5,5,-4,4,3,6,0,-8,0,-4,-3,-8,-4,2,4,-3,4,7,9,4,3,-5,9,-5,-7,2,5,9,-9,6,-8,5,5,-3,-9,-9,6,-2,-5,1,-9,-8,1,-8,-2,4,8,3,3],[9,7,-5,-3,2,-7,9,7,4,8,-7,-7,-3,-8,-7,-1,-6,-9,4,2,-3,-8,2,-5,0,5,-6,6,-2,0,-4,-4,-8,7,-4,1,8,9,-9,-1,3,5,4,5,-4,8,4,0,-4,9,7,-6,-4,8,-1,4,2,-1,9,-2,7,3,-2,-3,-5,-5,8,7,-1,5,-5,-4,-2,3,-6,-2,7,5,-8,-5,8,4,3,-8,-1,-4,7,8,7,8,5,-7,4,3,-7,9,6,4,-2,-4,-9,-6,-6,-8,3,2,1,-5,-5,9,2,7,-5,-1,-8,-1,6,4,-5,-7,8,5,9,-9,1,-7,8,-6,-9,-9,-6,-4,7,-4,5,-3,-1,0,9,-8,-8,-7,-7,-5,8,5,-6,-2,4,-9,6,0,9,0,-4,7,6,6,-3,-8,2,-4,-6,9,-4,-6,-8,1,-3,6,8,-8,3,7,-4,8,2],[7,1,-2,4,0,-9,9,-5,-1,-4,-4,-1,-3,1,7,2,9,-9,2,-8,4,-4,-5,-8,-7,0,4,0,-5,8,7,3,-6,-2,6,-2,3,-4,0,-4,3,-7,-4,4,6,0,2,0,-2,-2,1,1,7,-9,-9,8,9,3,-8,-9,9,1,-1,9,0,-7,9,-2,9,0,-2,5,6,0,-9,3,-9,8,1,-9,1,4,-7,-6,9,8,-1,3,-1,1,-6,2,4,7,-1,3,-1,-5,-7,-4,-7,-5,-7,-4,8,9,-9,5,-4,-3,9,5,-7,2,7,-1,-8,-7,-7,0,5,0,4,2,-2,7,5,8,-2,-8,-2,3,1,4,-3,-7,4,5,1,3,-7,-4,2,-8,-8,-3,2,-3,0,-8,-7,-3,2,4,7,1,2,-5,-2,-2,9,4,5,1,-3,-5,2,1,-6,9,7,-6,2,3,-1,-2,-9],[0,8,-1,-7,-5,-2,-5,7,-9,1,2,-4,-4,8,0,3,1,3,7,8,-3,-2,-7,-7,-7,-1,1,5,2,-6,3,-2,8,4,8,5,3,-8,0,9,1,8,0,2,-2,2,6,-5,1,-9,7,7,-1,1,6,5,4,-6,9,6,9,4,9,-8,5,-9,-4,-5,-3,-7,-8,-6,6,2,-2,-2,-2,-7,-8,7,6,7,-8,0,1,-7,-4,3,0,2,-4,-6,-5,6,3,-6,-4,-1,2,7,2,9,-4,6,-9,3,-3,5,3,-7,2,6,5,-1,-7,1,8,-5,3,3,-5,-9,-8,-3,0,8,-7,3,-9,8,6,-2,-2,7,6,-2,-7,1,-1,8,-6,-7,-6,-6,1,-2,-4,2,1,8,0,-8,-1,-7,7,-5,5,2,7,5,1,-9,1,7,4,-4,-3,9,-7,0,-5,9,-8,3,2,3,3],[-2,8,-4,-9,0,5,2,1,3,9,2,7,6,-4,7,6,6,4,-9,3,3,2,-7,1,-8,2,2,1,-5,-4,-5,-3,8,-6,4,-4,2,8,-2,9,7,-4,5,-8,5,-5,-3,-5,-5,-6,4,-5,-2,-8,3,-5,-2,3,3,-8,-9,-9,-9,5,0,-3,-7,8,9,9,-7,7,-2,-8,1,8,-8,9,6,-2,-8,-4,-6,-6,-7,-6,3,8,1,-6,5,4,5,-1,1,9,-5,-2,-4,-9,4,-4,8,6,-5,0,-3,-9,-2,3,-2,-7,3,2,0,-7,3,-3,0,-6,-6,1,6,-8,1,-2,6,0,8,-4,-8,4,4,-2,3,5,-3,3,-2,5,-2,-4,-5,2,9,3,0,8,5,-4,-1,-6,-4,0,8,-4,8,4,8,-8,-6,7,1,8,5,-4,9,-1,-3,9,5,-2,-9,-9,8,5,-9],[-3,7,-1,-5,-6,-5,-7,8,1,3,3,-8,9,9,3,0,-3,3,2,-8,-6,3,7,5,6,6,4,1,6,8,2,9,9,2,2,-1,-9,1,4,-4,9,7,-3,2,7,9,-8,-6,-7,-7,4,2,7,6,3,6,-2,-9,1,0,9,3,1,-9,-3,-2,-2,0,0,-7,9,-8,-6,9,-2,0,-5,-4,2,-5,-8,-8,-1,9,-6,-9,3,-7,6,8,-5,9,-5,2,-3,-1,-8,9,-9,-5,9,1,-3,6,-8,-7,-3,-5,-8,9,-6,-6,-8,5,-8,7,-9,-6,-4,4,-4,-5,-5,7,-1,3,-3,3,-4,-2,0,-1,-4,-1,8,-1,-3,-8,-7,-9,-5,1,-6,-1,-5,6,-2,-9,1,-8,-9,4,-6,5,0,5,6,-5,-1,-8,2,-4,-1,5,2,2,8,-6,-9,-7,3,-8,-3,4,6,-6,5],[-1,6,5,1,-7,-3,-1,2,8,1,-4,-1,7,-5,0,3,9,5,-7,-5,-7,-6,6,-6,1,0,-5,-8,-3,-5,6,5,-7,9,7,-7,-4,-8,3,-9,-4,-2,-7,-9,-3,4,-5,5,4,-9,-6,0,5,9,2,8,5,-4,7,2,-9,-6,4,-5,-2,7,-2,-7,2,5,4,7,-3,8,8,-6,9,3,5,-9,-9,8,1,-1,-4,-8,-2,-2,4,-3,6,0,-8,-9,-7,9,1,4,-3,-5,-2,-5,2,7,-1,4,-4,-2,4,6,-6,-4,1,9,-1,-2,-6,-4,-9,-7,-7,-2,-8,2,-5,8,-8,8,-6,9,4,9,-6,-4,9,1,2,8,6,4,-4,9,-5,6,-1,3,-5,-5,-5,4,-7,7,-3,-1,-8,2,-8,-6,-7,-1,-9,-4,7,8,-3,0,0,8,5,2,-3,-5,7,-1,-2,2,4],[-4,2,-2,-5,-2,3,7,8,7,-5,-3,-8,-9,-4,0,8,2,6,3,8,9,8,-4,-3,-4,5,-2,-8,2,-6,-5,-4,2,1,-9,1,-4,-3,8,9,2,-4,-4,1,0,-5,-4,-2,2,3,8,2,8,-6,9,-3,6,5,5,-7,-6,-3,5,-7,-7,-1,1,5,-8,-5,8,-2,6,-9,2,-8,-8,1,3,-5,-8,-4,-1,-3,1,6,-3,5,0,-4,8,-1,-9,2,4,6,4,-9,1,-8,-3,0,5,-8,8,-2,5,9,1,-5,6,-9,3,-6,-2,8,-4,9,2,3,-7,4,-3,1,2,5,-3,7,0,6,4,-9,6,-3,6,8,5,-7,4,-2,4,5,-2,2,9,6,-9,-4,-9,-4,-8,9,-5,5,-3,7,1,-9,7,-2,1,9,2,-3,8,-7,-1,-4,7,-5,-7,-6,-4,-7,1,-6,4],[-1,8,-5,-2,-6,1,5,-1,3,0,7,-5,2,-7,-4,1,9,3,8,7,4,-7,-4,8,-3,-2,8,2,8,-7,-4,5,6,-6,6,-5,1,1,-2,4,-5,-6,-4,-5,-1,-2,0,-8,8,9,0,5,-2,-1,1,-5,-7,0,1,-2,9,2,-9,2,-5,-6,-4,9,8,4,-5,-3,2,8,-4,-2,1,-7,-8,1,2,-6,-9,-3,-7,-8,2,9,-7,-1,7,8,7,3,4,-5,-5,-3,-6,1,1,-9,-3,2,-2,9,3,-4,4,4,9,-3,-7,-2,8,-7,-4,-1,3,2,0,-8,-8,-8,9,-1,-1,5,0,5,4,-8,7,8,-8,6,2,3,-7,6,-6,-8,6,0,6,5,-1,-1,-2,4,-7,-9,9,8,-2,-7,0,-5,5,-3,8,-2,3,-1,-2,2,5,-4,-5,-5,7,8,4,-1,8,3,5],[-9,5,8,4,7,-5,8,7,-6,7,-7,4,-7,-5,-1,0,0,-2,-9,-7,-7,7,-1,9,9,8,-9,-8,-9,7,-9,3,-8,9,9,5,-1,8,-2,0,5,-6,0,0,3,8,4,-5,2,6,1,-7,9,2,-3,-1,-3,7,9,-6,-5,4,7,-1,0,5,1,-2,6,0,8,8,0,-7,7,-2,1,-8,7,-8,-9,7,-1,3,-1,-9,0,-7,7,3,-6,4,7,-5,-3,8,5,6,4,-1,8,-6,2,7,-4,1,3,4,-3,5,-5,-1,-8,7,-9,1,-8,0,2,-6,3,8,-2,0,-1,-4,-4,-9,-3,5,-1,-1,6,7,-3,7,-8,7,-9,-1,0,3,5,-4,0,5,3,-5,-4,4,0,9,0,4,2,-7,5,0,-1,0,0,2,0,-2,-2,-6,5,-5,4,5,-3,9,3,7,-2,-4,2],[6,-1,-4,6,-8,4,1,-7,6,1,6,5,7,9,-6,3,3,1,-5,1,-5,1,-2,5,0,-8,9,-8,4,7,-5,0,9,5,-8,1,-1,6,4,-5,-1,-7,4,5,-5,6,-4,-7,2,6,-7,-1,7,9,-9,-7,-8,-2,2,6,1,-7,-5,9,-8,9,-9,4,-9,6,-5,9,-8,-9,-1,-4,5,8,1,-6,4,5,-9,-9,0,0,4,-3,-6,0,2,-1,3,1,8,2,-5,8,1,9,-2,1,-2,7,9,5,-5,-9,4,-8,-1,-5,9,5,4,2,-6,0,9,-1,9,-9,5,6,-6,4,9,8,2,-7,8,7,7,-3,7,-5,-7,9,0,9,4,-4,8,5,0,7,9,-4,3,1,-3,8,6,2,-3,9,-8,4,-2,-4,9,-2,-6,-5,6,-7,4,-7,-1,-2,-3,1,-8,5,5,-4,-3],[4,9,2,-4,-6,7,-6,3,-9,-4,-4,-1,4,-6,7,-7,8,-6,6,-6,-4,1,3,-8,-1,8,-5,-9,-4,3,5,-4,-9,-3,-8,-1,-1,1,4,7,-8,7,4,3,7,-6,5,4,4,4,8,-2,3,6,-6,6,7,-8,9,1,9,-4,-2,-4,2,-3,-6,-1,8,-2,9,6,-1,-8,1,-4,-3,4,-4,7,-3,0,2,1,1,5,-3,-9,-8,9,7,-7,1,-6,-3,-8,-8,8,7,6,-3,-1,7,8,-7,7,-9,-9,6,-2,-5,-6,-7,2,-2,5,-5,-8,4,-5,-5,-5,6,-3,3,-5,4,-8,-6,5,-6,-5,0,9,-6,-4,2,3,8,-9,-1,7,4,-5,-5,-2,6,9,-2,-2,0,2,-4,7,6,9,-8,7,9,-1,1,7,2,7,-2,9,0,-8,8,0,-9,-8,5,4,-5,-2,-6]]],[13,14,169,155],[72,33,109,156],[9,17,172,170],[18,4,174,163],[3,10,145,175],[5,15,168,169],[99,175,159,175],[137,54,152,149],[9,1,185,166],[18,14,164,160],[0,0,168,161],[13,5,165,161],[1,2,153,162],[1,16,142,161],[2,10,175,164],[13,8,158,168],[9,7,167,158],[23,99,39,119],[51,56,73,130],[17,6,176,148],[16,10,185,157],[0,12,156,160],[3,10,155,170],[2,2,144,150],[4,13,177,166],[2,2,148,142],[7,15,165,168],[7,10,148,173],[17,6,182,164],[15,9,180,168],[1,17,177,171],[1,2,162,142],[3,3,147,161],[3,10,159,167],[5,1,180,174],[16,16,178,162],[11,1,164,168],[5,9,163,152],[7,15,168,169],[4,16,174,166],[15,17,158,173],[12,2,181,164],[14,12,170,176],[10,10,153,164],[11,16,183,156],[4,4,157,168],[15,4,155,148],[9,5,158,148],[18,6,165,159],[10,8,162,160],[10,1,177,154],[7,7,160,156],[3,16,151,160],[70,167,146,170],[5,5,163,161],[8,8,151,149],[17,1,182,142],[15,8,167,165],[133,134,137,144],[13,13,181,174],[6,13,152,166],[12,1,178,171],[59,86,168,105],[9,10,170,155],[94,135,136,156],[5,14,177,164],[2,0,169,162],[0,3,168,167],[0,4,149,154],[96,130,159,138],[6,0,169,157],[6,8,181,176],[9,1,183,149],[7,15,174,162],[15,13,185,171],[6,11,174,173],[3,17,150,157],[16,0,169,140],[17,8,157,166],[4,0,165,142],[14,3,175,159],[1,15,146,170],[13,11,161,172],[1,7,164,153],[8,10,174,166],[5,11,147,157],[81,28,161,172],[1,4,173,168],[9,0,180,140],[10,12,172,167],[13,9,164,159],[13,0,168,170],[0,8,184,163],[11,15,176,172],[16,14,183,173],[13,3,175,172],[3,1,156,146],[14,7,154,157],[12,2,168,159],[10,0,154,145],[9,0,170,165],[6,17,163,162],[161,99,181,103],[9,15,179,165],[13,6,161,158],[11,0,178,148],[0,8,162,157],[5,0,154,170],[3,17,146,170],[3,11,184,163],[5,11,176,172],[11,8,185,173],[7,0,148,153],[11,6,154,171],[1,14,178,162],[17,0,165,151],[10,6,166,165],[11,2,173,153],[24,145,120,176],[2,3,148,161],[12,7,154,148],[5,11,174,162],[8,9,156,171],[3,4,161,156],[13,11,165,158],[17,6,172,170],[14,11,172,172],[4,3,173,149],[4,7,156,176],[8,8,157,152],[4,4,175,153],[2,14,150,171],[12,11,185,153],[7,2,183,163],[46,157,149,173],[18,5,178,145],[5,5,151,176],[6,6,169,158],[9,3,173,169],[11,7,159,173],[15,7,162,162],[9,7,153,150],[13,10,153,172],[6,14,161,164],[147,50,168,52],[8,5,156,159],[71,90,122,110],[13,16,178,174],[5,5,175,146],[6,17,184,161],[16,8,166,154],[15,6,178,149],[11,3,173,159],[10,3,157,149],[1,6,145,154],[0,2,140,146],[13,1,183,156],[3,7,152,161],[8,0,168,164],[7,8,151,157],[10,7,151,154],[4,6,169,147],[6,13,176,164],[10,16,173,162],[15,13,169,164],[5,3,163,175],[13,16,183,175],[12,16,184,161],[3,4,155,148],[8,0,148,140],[12,5,158,165],[16,2,164,144],[11,0,159,172],[0,5,174,176],[18,0,165,171],[0,14,143,168],[10,10,185,155],[2,14,153,158],[5,4,185,149],[17,4,180,146],[8,2,165,162],[6,15,153,163],[16,13,183,168],[11,4,163,148],[11,13,170,172],[17,16,167,170],[11,12,161,153],[1,6,154,146],[3,4,146,168],[13,2,155,161],[2,10,176,166],[9,1,167,164],[41,49,49,104],[9,17,150,176],[11,11,176,176],[9,4,165,170],[171,155,171,164],[2,5,184,150],[13,10,157,153],[3,6,167,160],[17,2,168,157],[16,7,179,157],[11,2,167,166],[32,121,88,141],[11,17,179,171],[18,14,171,160],[1,12,181,154],[17,8,183,155],[128,115,142,120],[10,3,172,151],[0,11,169,162],[4,11,163,163],[4,3,152,146],[2,0,166,175],[10,5,172,168],[14,10,176,172],[0,13,180,158],[10,11,185,169],[8,12,180,172],[11,4,178,149],[16,4,176,145],[0,1,145,152],[6,17,148,162],[3,9,182,168],[15,4,160,171],[16,9,156,157],[11,5,167,170],[15,16,168,174],[6,3,167,163],[3,4,151,174],[13,11,179,151],[12,14,178,173],[133,1,183,163],[5,6,166,163],[7,0,155,153],[10,10,185,165],[7,10,167,176],[1,6,156,174],[9,6,161,157],[65,132,167,150],[0,11,183,157],[8,6,183,175],[15,9,163,149],[16,1,177,176],[1,1,144,143],[11,5,155,175],[2,2,171,144],[0,1,180,174],[1,16,170,164],[8,7,175,147],[16,7,170,160],[6,12,164,166],[13,2,153,155],[9,16,172,157],[112,26,166,139],[8,1,167,167],[9,7,168,148],[13,3,167,164],[10,1,167,141],[16,1,156,165],[101,13,156,145],[14,14,168,170],[14,11,156,155],[16,13,163,162],[18,17,180,174],[3,9,152,168],[4,3,162,175],[15,17,157,166],[1,11,150,162],[6,1,160,154],[2,9,170,154],[4,3,167,155],[14,1,171,165],[2,0,179,140],[7,2,149,151],[11,17,175,164],[6,10,173,158],[39,13,136,176],[7,7,165,160],[0,10,156,173],[10,0,162,174],[1,4,168,155],[11,7,154,171],[17,9,177,175],[7,15,156,172],[4,2,181,148],[0,17,143,159],[10,12,166,157],[2,1,181,176],[65,23,153,100],[1,10,182,154],[18,8,164,175],[17,9,172,159],[0,8,162,159],[16,2,166,170],[12,6,180,161],[15,3,163,160],[12,0,171,140],[15,16,177,162],[12,16,171,160],[9,3,167,173],[8,150,173,151],[3,3,180,148],[1,5,177,173],[7,1,154,143],[3,10,163,152],[11,15,167,162],[4,1,184,175],[12,5,164,156],[13,17,182,164],[3,15,150,163],[5,13,149,164],[167,164,185,170],[14,7,157,167],[181,124,183,151],[1,10,175,155],[5,15,167,163],[9,3,165,170],[15,13,178,173],[17,17,167,158],[2,11,176,173],[16,7,179,170],[8,15,184,167],[2,2,167,174],[0,14,171,169],[13,5,170,160],[14,4,173,148],[4,6,169,170],[7,12,182,154],[17,10,182,159],[2,2,176,170],[4,14,145,166],[9,3,185,158],[4,17,168,169],[0,3,177,146],[18,3,183,176],[16,1,169,152],[3,15,173,163],[7,14,178,168],[13,15,172,171],[7,11,175,166],[2,11,184,159],[3,11,147,165],[12,0,162,174],[13,17,156,169],[1,8,145,159],[17,16,176,157],[4,8,167,169],[2,3,174,156],[7,1,168,158],[12,8,154,152],[16,12,169,157],[4,10,166,160],[12,6,172,173],[2,10,175,153],[7,1,179,150],[11,8,175,175],[9,2,163,171],[1,12,158,163],[14,10,160,174],[6,17,150,176],[4,2,181,171],[10,10,158,155],[0,1,157,176],[16,0,163,151],[9,6,162,146],[0,1,173,171],[6,54,64,105],[7,1,183,162],[0,3,145,167],[13,9,171,166],[5,5,172,165],[13,1,178,160],[13,2,167,172],[17,50,159,170],[8,0,158,159],[115,133,160,140],[4,13,183,154],[18,17,160,157],[18,10,169,170],[17,4,182,172],[7,6,171,149],[13,8,175,150],[18,2,158,153],[16,12,160,167],[14,5,167,145],[18,16,182,165],[0,1,160,161],[64,155,88,166],[11,2,165,144],[135,7,153,101],[10,4,184,176],[7,0,151,160],[147,111,160,176],[14,7,159,152],[161,10,163,95],[9,12,176,154],[8,11,157,170],[3,10,176,173],[4,0,177,150],[125,106,137,141],[8,2,169,151],[0,9,140,156],[9,4,155,149],[17,8,177,163],[5,8,164,166],[1,6,158,161],[3,13,144,171],[0,10,181,176],[9,5,164,158],[13,1,164,155],[16,0,161,161],[10,5,172,148],[17,3,185,153],[6,3,163,172],[4,0,180,142],[8,16,160,160],[6,14,158,176],[1,8,180,158],[101,88,181,175],[12,17,175,157],[1,67,69,143],[1,0,162,168],[6,17,183,159],[3,14,171,165],[12,7,161,167],[13,3,153,165],[1,7,161,156],[9,0,168,152],[16,5,176,163],[3,12,154,166],[18,3,183,173],[16,3,171,170],[8,3,171,167],[15,16,171,160],[3,7,169,156],[53,56,99,132],[12,8,170,150],[2,12,152,155],[6,5,166,153],[6,4,185,145],[1,2,168,172],[4,0,171,174],[8,4,160,170],[1,3,142,169],[7,12,176,175],[4,11,161,176],[5,15,160,155],[8,17,183,166],[14,0,166,167],[0,4,163,150],[13,11,161,157],[0,4,145,176],[14,0,177,147],[0,10,160,157],[16,12,176,153],[14,5,159,168],[18,4,166,155],[9,2,152,168],[9,0,171,161],[0,16,160,162],[1,8,177,155],[2,2,145,144],[4,2,181,157],[16,1,163,166],[8,0,175,165],[2,1,147,162],[3,5,181,147],[15,4,163,156],[11,15,157,169],[2,16,153,157],[1,4,160,164],[2,15,180,163],[11,10,184,166],[1,103,106,133],[1,5,180,148],[116,93,168,152],[5,0,157,146],[14,10,175,162],[11,2,176,165],[15,0,167,164],[1,5,146,145],[16,6,183,175],[12,17,180,157],[6,6,168,158],[3,10,178,167],[11,6,157,170],[4,5,156,159],[12,16,152,176],[7,9,147,167],[11,1,158,146],[12,1,181,144],[4,4,151,175],[15,13,159,158],[0,10,169,166],[14,4,172,170],[6,0,183,152],[9,14,155,157],[10,12,161,166],[9,4,171,151],[7,4,183,155],[7,1,153,175],[48,42,108,157],[173,122,179,143],[13,7,165,164],[10,5,180,150],[16,9,172,158],[0,12,173,166],[4,2,174,153],[0,2,142,164],[11,16,151,169],[6,5,166,149],[0,16,145,156],[15,9,178,160],[10,7,168,150],[8,8,168,170],[17,8,167,173],[17,6,178,155],[17,3,181,157],[2,2,181,152],[10,12,185,171],[11,14,166,157],[102,12,129,47],[15,9,161,157],[41,146,157,165],[11,3,160,149],[16,3,168,143],[10,3,167,161],[7,12,176,158],[45,107,169,120],[13,7,178,166],[1,9,165,171],[3,5,181,151],[17,8,179,157],[13,11,174,158],[3,8,146,172],[3,9,146,159],[4,4,166,150],[7,10,164,160],[11,1,165,160],[3,12,171,159],[34,93,119,117],[5,13,177,169],[2,11,164,163],[12,15,158,170],[67,84,150,111],[16,7,182,167],[9,2,175,155],[18,1,178,150],[8,3,165,155],[18,1,169,172],[9,6,160,176],[3,3,175,161],[1,16,143,160],[14,4,154,160],[4,8,167,165],[95,86,144,148],[18,7,166,152],[5,7,185,170],[16,16,176,169],[8,6,161,161],[12,2,152,170],[5,0,184,167],[16,7,161,171],[5,5,164,171],[8,3,180,176],[4,16,162,156],[0,12,164,163],[10,5,163,174],[1,11,161,154],[13,6,154,152],[11,0,174,175],[16,0,183,152],[14,9,163,172],[7,8,169,159],[12,17,166,170],[0,12,145,152],[17,4,163,166],[155,128,183,154],[3,3,175,162],[73,56,93,145],[16,3,164,146],[7,6,184,172],[9,6,175,150],[6,6,162,175],[10,1,154,147],[0,9,171,151],[12,10,157,166],[3,13,175,154],[25,124,138,154],[8,3,181,154],[15,4,160,158],[12,17,179,175],[5,14,179,154],[78,61,103,101],[4,1,150,172],[8,15,165,175],[15,1,182,176],[4,1,173,176],[9,10,151,156],[102,83,140,137],[15,4,165,162],[4,12,147,174],[3,4,166,173],[1,0,157,144],[42,15,55,120],[1,15,149,172],[17,5,172,167],[17,15,161,157],[3,16,150,156],[13,13,185,165],[15,9,173,157],[2,5,149,162],[16,4,177,176],[11,5,153,167],[17,2,182,170],[3,17,143,170],[12,2,185,150],[119,167,153,170],[16,11,162,173],[17,8,169,149],[6,3,183,168],[15,15,173,157],[14,1,163,172],[18,15,171,174],[10,15,166,171],[7,14,181,173],[2,14,166,165],[10,10,161,158],[0,13,180,170],[14,8,172,171],[0,6,152,175],[9,4,165,161],[11,8,175,155],[3,3,146,173],[16,9,183,151],[128,147,162,171],[1,0,143,158],[13,3,172,153],[14,16,179,171],[10,11,175,174],[0,10,179,157],[18,4,170,168],[8,5,181,165],[3,3,149,156],[30,93,67,123],[8,10,153,156],[15,11,167,161],[5,6,147,172],[4,9,148,156],[15,130,40,156],[12,1,164,171],[9,0,176,157],[15,10,185,162],[9,0,159,149],[0,4,183,174],[14,1,183,141],[3,3,150,152],[24,81,55,126],[14,2,184,167],[2,1,169,145],[0,5,156,161],[3,2,182,161],[0,0,141,147],[6,16,165,163],[14,0,177,155],[11,5,164,158],[13,4,155,170],[4,16,182,159],[4,16,179,172],[3,11,177,160],[18,4,160,166],[4,15,161,166],[10,13,173,174],[14,6,165,168],[11,8,169,171],[6,12,151,175],[113,128,125,153],[9,5,163,176],[9,5,158,174],[16,6,176,165],[14,14,163,154],[14,10,183,176],[7,15,162,167],[117,143,151,157],[4,3,148,173],[15,9,167,157],[7,1,185,144],[15,2,167,165],[13,12,176,172],[13,5,155,161],[2,8,173,171],[4,12,166,153],[18,3,165,169],[5,12,146,169],[14,16,157,166],[5,6,179,153],[9,9,175,158],[25,61,159,77],[3,9,166,163],[7,17,157,168],[7,11,170,174],[13,1,160,170],[8,9,170,160],[4,2,185,174],[180,164,181,164],[15,11,161,175],[15,9,160,155],[16,2,158,172],[9,5,162,160],[7,15,160,171],[0,1,173,149],[17,7,165,165],[2,17,148,175],[5,8,172,173],[7,7,153,165],[18,6,174,175],[11,2,156,162],[1,8,161,170],[12,12,175,165],[17,2,180,164],[3,7,148,155],[1,3,179,150],[4,12,159,155],[1,6,154,155],[9,10,158,165],[5,5,176,148],[5,6,182,169],[5,17,169,174],[15,4,182,157],[2,5,176,150],[2,14,161,163],[2,3,177,160],[11,16,160,168],[15,1,162,155],[0,6,154,168],[14,7,177,149],[0,3,170,146],[22,39,149,113],[15,4,163,150],[13,6,176,160],[10,3,167,157],[0,6,146,165],[146,36,184,136],[8,17,172,176],[12,15,165,170],[0,13,155,168],[4,8,160,153],[4,12,160,154],[1,5,179,151],[7,8,176,174],[12,3,160,148],[17,1,157,172],[104,103,130,108],[1,13,184,161],[8,13,182,158],[15,7,169,148],[15,11,177,154],[8,5,156,175],[5,1,180,174],[6,14,173,175],[4,15,178,159],[6,15,149,158],[14,12,180,171],[2,3,146,167],[9,4,152,166],[5,144,120,159],[10,3,175,163],[1,17,176,157],[17,7,176,154],[8,0,149,150],[1,3,177,175],[156,40,169,113],[1,0,177,163],[6,5,174,150],[5,12,177,164],[9,14,150,164],[0,11,180,167],[4,0,162,175],[18,7,185,151],[12,2,175,147],[2,6,151,169],[3,5,157,150],[158,155,180,170],[18,14,184,172],[16,15,174,163],[11,16,153,163],[12,10,156,165],[86,124,132,146],[17,7,162,169],[16,7,175,151],[7,2,151,159],[4,6,180,148],[13,0,174,146],[52,169,58,176],[87,124,114,126],[10,1,159,163],[6,16,179,159],[8,7,175,152],[1,7,163,150],[15,2,179,146],[5,2,175,172],[6,0,153,171],[12,16,154,171],[5,17,160,162],[5,14,154,154],[139,69,162,114],[9,5,178,171],[2,12,167,168],[6,5,155,165],[13,6,162,152],[7,17,178,166],[14,9,159,169],[2,6,176,151],[1,6,151,176],[92,155,104,168],[14,4,158,162],[125,152,169,164],[17,5,157,159],[1,5,144,149],[4,5,163,152],[13,2,184,152],[15,4,157,149],[7,4,176,174],[13,13,169,158],[1,7,169,148],[6,7,165,168],[5,5,151,156],[2,15,156,176],[3,0,171,159],[10,0,161,148],[9,10,166,163],[12,0,170,159],[15,6,173,173],[13,3,155,159],[12,6,184,158],[11,5,174,150],[1,4,150,168],[9,4,185,151],[17,2,170,153],[12,1,155,154],[11,2,151,144],[2,5,154,154],[0,0,167,176],[0,11,174,163],[6,14,174,157],[6,6,180,173],[18,6,164,174],[15,12,181,156],[10,4,170,167],[16,11,179,169],[3,8,152,150],[7,1,175,175],[8,14,151,171],[15,8,185,164],[18,2,172,162],[7,17,162,165],[17,14,181,163],[1,10,170,168],[96,133,141,158],[5,6,172,171],[16,13,178,161],[14,1,179,165],[8,11,164,172],[1,1,150,150],[2,2,156,152],[18,0,182,155],[7,5,156,150],[5,7,165,156],[6,10,150,162],[5,8,159,160],[12,0,154,176],[13,9,153,155],[12,6,162,152],[5,2,165,163],[17,14,182,172],[4,0,160,163],[3,9,171,173],[16,4,173,169],[13,10,163,158],[89,147,125,148],[8,14,177,161],[3,15,170,174],[17,12,171,167],[13,9,156,165],[4,3,185,160],[5,8,178,165],[4,4,162,176],[18,0,166,164],[17,0,175,170],[0,13,165,156],[17,9,163,175],[3,16,143,162],[0,7,170,160],[7,5,183,162],[91,16,179,131],[18,2,163,153],[0,3,163,157],[2,3,162,169],[17,4,159,174],[3,15,174,167],[13,13,164,170],[16,11,156,168],[18,15,170,165],[18,8,174,154],[3,1,162,147],[6,16,156,161],[18,0,175,176],[13,6,161,169],[7,15,175,155],[6,2,179,147],[16,14,157,169],[0,1,185,168],[6,0,165,159],[10,10,159,174],[5,1,172,159],[16,9,174,153],[2,16,185,156],[3,3,175,153],[10,8,154,167],[8,4,178,165],[3,7,170,166],[2,3,170,146],[10,15,156,167],[17,15,161,169],[16,1,178,170],[10,4,153,147],[10,3,171,168],[0,9,154,174],[10,8,176,156],[11,5,166,146],[9,4,154,170],[5,9,160,158],[10,6,150,164],[8,3,184,152],[45,151,129,161],[175,128,177,131],[4,16,168,159],[0,9,173,152],[3,9,143,172],[2,7,148,160],[19,8,156,166],[14,15,183,173],[2,5,175,149],[11,10,179,162],[10,6,170,167],[13,17,168,176],[84,106,96,159],[9,16,168,167],[8,12,168,173],[2,6,152,156],[3,15,161,167],[3,6,161,160],[14,14,167,166],[10,10,153,160],[14,8,159,156],[9,11,150,169],[13,14,156,174],[8,10,183,176],[11,9,170,161],[14,17,180,175],[3,14,143,169],[15,3,179,154],[17,16,174,173],[12,6,156,165],[17,3,165,154],[1,2,180,156],[3,12,171,174],[12,8,164,171],[8,7,149,167],[16,8,166,169],[8,1,177,154],[7,10,152,176],[8,7,148,171],[15,4,155,149],[8,2,163,168],[7,2,156,149],[15,6,180,158],[8,1,181,151],[8,4,155,155],[17,2,159,176],[4,9,176,175],[9,0,181,140],[4,6,177,174],[11,3,168,155],[5,7,149,163],[5,8,151,166],[12,0,170,146],[16,1,176,147],[16,103,164,127],[18,16,174,169],[18,5,184,175],[14,10,172,152],[14,1,182,167],[4,2,167,161],[4,17,146,171],[12,11,164,171],[13,0,171,165],[4,6,162,165],[15,1,164,144],[14,0,154,148],[14,7,180,175],[17,1,176,168],[3,12,167,168],[16,3,161,146],[13,8,167,165],[0,9,182,153],[9,1,179,174],[17,0,182,157],[17,0,160,140],[68,77,69,139],[27,100,171,153],[11,11,159,161],[18,3,185,155],[5,15,150,155],[0,1,151,150],[14,1,177,148],[5,8,185,161],[160,176,165,176],[7,9,158,161],[7,17,180,173],[2,15,179,163],[12,1,171,147],[6,8,167,96],[1,8,178,150],[0,13,179,163],[112,135,154,164],[58,36,90,66],[54,68,122,138],[5,1,147,148],[18,12,163,171],[3,10,153,163],[18,8,180,176],[7,8,162,149],[9,15,160,157],[5,16,169,166],[14,16,159,170],[5,13,167,158],[14,11,163,156],[7,7,158,171],[8,10,153,163],[6,17,148,157],[11,10,176,154],[1,9,141,168],[11,7,185,163],[107,152,139,169],[14,10,175,153],[92,74,172,88],[111,56,138,130],[16,2,182,174],[12,8,159,169],[10,8,158,158],[11,14,159,170],[7,13,180,160],[9,9,161,159],[0,11,160,151],[15,3,183,174],[0,7,144,167],[11,3,155,146],[15,1,169,174],[13,17,177,175],[99,95,128,157],[11,15,184,157],[18,8,162,163],[2,1,147,176],[9,14,177,158],[13,17,182,169],[17,6,179,154],[2,14,144,164],[4,7,145,173],[13,13,179,162],[8,15,149,169],[13,3,165,166],[5,2,154,157],[16,7,182,164],[11,10,158,156],[6,5,153,146],[13,5,184,171],[14,12,157,172],[0,12,145,173],[0,15,172,174],[64,54,176,143],[1,11,183,161],[0,5,148,158],[3,4,160,150],[3,118,158,150],[12,2,166,156],[2,5,182,165],[7,6,176,149],[11,11,152,157],[0,1,148,157],[145,61,172,118],[0,15,162,173],[13,12,164,155],[9,1,178,143],[11,7,169,171],[10,3,173,165],[84,54,174,139],[14,5,182,172],[16,5,174,153],[5,11,162,162],[1,0,169,171],[7,11,162,160],[1,15,155,169],[0,12,156,167],[9,3,174,155],[7,4,184,157],[5,13,182,160],[7,6,147,170],[1,10,146,169],[8,12,154,155],[10,5,182,164],[9,0,174,157],[0,3,181,151],[13,2,174,152],[6,9,157,160],[31,3,119,169],[126,39,130,99],[16,10,165,162],[7,0,162,172],[13,10,163,165],[5,8,184,166],[1,11,158,159],[2,17,156,174],[6,6,154,152],[109,36,169,55],[17,4,162,150],[5,0,165,167],[12,2,158,153],[9,12,151,168],[0,4,185,149],[9,0,179,174],[2,11,154,174],[10,4,155,164],[16,3,177,163],[4,0,161,175],[15,5,167,150],[3,4,159,173],[9,11,176,151],[2,0,174,169],[3,8,166,162],[14,2,168,161],[0,3,167,176],[4,13,154,165],[15,5,182,148],[91,7,112,61],[9,1,150,168],[6,1,155,157],[12,7,152,154],[17,13,183,173],[8,6,149,167],[1,10,156,151],[12,12,161,155],[12,4,160,167],[12,5,179,155],[6,7,163,149],[9,6,164,173],[3,11,167,175],[14,13,172,162],[4,16,182,159],[11,17,174,168],[0,9,142,170],[15,3,167,170],[0,14,149,162],[18,7,171,162],[4,3,179,153],[175,168,185,169],[14,16,169,164],[2,7,185,164],[5,5,177,164],[11,0,173,166],[1,7,181,169],[5,7,167,168],[9,5,180,163],[174,146,180,167],[6,12,168,167],[10,7,171,172],[64,156,129,159],[5,15,182,173],[5,10,183,150],[11,3,184,155],[9,1,149,166],[13,0,161,143],[156,69,174,115],[1,6,150,148],[6,15,165,175],[7,0,183,154],[16,17,159,164],[5,11,166,157],[15,6,181,160],[5,1,151,174],[5,5,164,151],[0,16,164,168],[3,17,163,158],[10,2,159,147],[15,14,166,155],[5,4,174,174],[14,6,167,173],[18,8,176,158],[6,1,177,149],[4,0,164,171],[8,5,167,173],[1,6,158,172],[10,6,156,150],[17,3,178,153],[16,1,182,156],[16,14,158,162],[15,6,161,170],[0,11,158,155],[16,2,182,174],[4,8,178,170],[2,17,177,160],[14,3,168,156],[5,1,184,161],[1,17,184,169],[13,7,177,162],[14,16,158,171],[158,29,172,99],[7,8,166,172],[6,13,154,154],[17,7,178,166],[1,0,150,156],[8,16,179,168],[7,17,184,172],[15,10,169,164],[15,4,173,151],[12,5,157,173],[1,5,159,161],[0,9,161,176],[14,11,171,165],[16,5,160,153],[4,12,144,175],[9,5,155,174],[8,4,168,153],[71,63,183,70],[4,11,179,155],[5,5,163,164],[3,4,174,158],[3,12,170,164],[8,13,153,164],[10,0,161,152],[6,4,180,149],[2,8,148,158],[4,8,156,175],[9,1,167,173],[4,1,174,175],[11,3,161,167],[16,13,157,164],[4,8,167,162],[9,8,169,148],[32,0,60,48],[14,5,159,171],[3,10,174,160],[10,3,171,176],[13,5,161,149],[11,9,163,166],[8,5,175,151],[13,6,179,161],[4,9,150,162],[0,0,154,170],[4,1,154,144],[106,114,162,132],[18,15,166,162],[5,7,174,170],[11,2,151,173],[3,16,150,168],[8,11,184,160],[16,5,176,148],[1,0,177,152],[8,5,170,173],[11,9,161,173],[4,10,172,175],[7,9,154,152],[18,4,171,144],[12,15,164,163],[83,122,140,160],[14,8,172,173],[15,2,184,172],[0,1,149,149],[4,82,162,138],[4,0,152,142],[9,15,159,174],[12,2,162,158],[11,15,185,171],[2,3,183,166],[18,8,183,149],[10,16,185,170],[7,11,176,175],[14,7,157,167],[8,2,164,170],[5,5,149,146],[0,4,185,155],[18,14,178,165],[2,0,154,153],[4,10,185,168],[0,4,152,146],[8,2,183,152],[14,10,180,160],[17,9,167,151],[18,14,177,174],[2,9,160,165],[18,3,176,143],[5,15,184,159],[33,23,176,172],[9,1,175,156],[11,10,185,160],[9,9,158,157],[4,3,185,164],[1,1,167,162],[3,3,162,171],[12,3,182,154],[10,11,171,155],[12,6,165,165],[12,2,181,149],[15,13,159,170],[1,4,183,170],[5,3,156,175],[3,7,185,158],[7,11,164,157],[0,15,180,173],[5,1,148,162],[16,14,156,158],[3,1,147,151],[61,85,177,92],[8,4,180,170],[9,49,120,161],[8,16,155,159],[14,10,156,152],[4,12,179,173],[74,144,159,160],[5,12,165,160],[3,9,168,165],[96,57,134,127],[1,14,170,158],[7,3,166,165],[15,6,181,165],[9,1,154,147],[16,13,179,168],[0,16,158,157],[5,6,175,158],[10,12,181,174],[10,10,179,161],[13,15,184,168],[7,0,185,156],[17,10,179,176],[7,6,155,153],[114,153,159,161],[5,11,170,162],[12,12,168,174],[3,8,151,152],[3,13,172,166],[0,10,184,164],[4,6,185,164],[10,7,176,162],[10,3,167,152],[11,8,179,166],[8,5,151,155],[13,6,162,170],[13,3,177,154],[2,5,182,151],[15,4,168,168],[113,161,161,175],[5,1,175,165],[3,3,168,145],[175,22,177,32],[64,155,95,160],[18,11,180,162],[10,1,168,176],[1,6,167,148],[2,5,164,173],[2,11,184,174],[18,8,167,154],[0,13,173,175],[11,13,154,172],[13,13,171,174],[14,17,165,162],[1,12,179,159],[11,10,166,176],[11,7,183,157],[14,2,173,148],[0,11,180,169],[12,4,167,158],[10,6,155,153],[4,1,154,171],[18,6,174,152],[10,12,165,167],[16,13,157,168],[6,13,151,161],[2,8,172,169],[4,13,185,153],[5,1,185,151],[9,2,161,169],[9,3,175,145],[13,1,177,170],[0,2,181,173],[2,1,157,147],[142,37,153,85],[15,4,176,164],[15,10,169,165],[2,0,150,176],[16,2,161,171],[2,8,174,150],[18,5,177,152],[9,14,151,175],[4,4,148,149],[7,11,156,153],[12,2,171,160],[2,1,179,168],[7,5,167,175],[16,1,160,144],[17,14,166,155],[145,27,182,153],[17,2,172,161],[12,1,168,150],[18,0,175,148],[15,4,159,150],[0,13,177,169],[9,10,167,159],[1,12,166,162],[114,113,170,134],[4,1,152,146],[3,2,149,168],[14,1,185,175],[6,10,168,163],[11,11,162,173],[56,17,134,46],[2,11,144,163],[11,1,183,149],[2,14,146,161],[0,14,180,155],[11,5,174,153],[1,13,152,170],[16,8,177,154],[120,168,183,173],[0,15,175,167],[10,9,182,149],[4,2,171,154],[7,3,148,161],[32,161,96,174],[2,7,143,174],[4,0,155,144],[2,16,169,171],[6,13,160,164],[161,42,170,78],[8,11,152,171],[12,8,168,149],[14,3,172,148],[12,9,182,165],[16,9,156,160],[6,14,159,169],[5,4,180,176],[106,61,110,120],[7,12,151,159],[10,10,168,172],[9,2,185,152],[11,3,161,150],[11,13,157,154],[2,14,177,164],[2,4,169,157],[2,0,182,173],[17,3,168,169],[89,36,145,175],[16,16,176,161],[0,15,182,163],[10,7,161,173],[9,13,150,166],[3,11,182,168],[14,1,179,175],[16,3,173,147],[9,13,158,168],[9,0,169,147],[17,17,166,162],[17,3,159,161],[5,16,172,166],[3,3,165,151],[18,14,169,175],[11,5,180,159],[9,12,155,152],[11,2,175,152],[7,17,164,162],[13,3,181,150],[2,9,163,155],[8,15,164,157],[164,170,167,175],[15,6,172,175],[12,14,183,164],[8,4,175,152],[17,16,182,161],[12,12,161,168],[6,2,183,164],[2,11,172,166],[12,16,167,161],[2,6,178,147],[16,0,164,159],[3,3,157,157],[3,2,143,174],[1,1,162,171],[10,10,160,165],[7,11,185,172],[0,6,179,172],[7,4,177,152],[14,14,157,162],[13,3,169,146],[6,10,168,171],[62,115,97,146],[0,11,162,170],[7,16,165,170],[1,15,148,167],[2,15,173,161],[7,0,172,153],[6,11,146,165],[11,16,158,174],[6,3,148,150],[18,17,173,157],[138,63,179,167],[6,16,159,172],[0,7,152,151],[1,1,173,156],[16,14,175,171],[8,0,183,151],[11,3,158,150],[0,2,183,153],[12,9,166,173],[134,120,155,171],[6,0,176,156],[5,2,151,164],[14,15,165,164],[3,5,152,146],[13,4,166,166],[13,15,182,158],[13,1,154,152],[1,5,179,175],[16,6,165,166],[13,0,155,147],[3,3,173,161],[1,16,185,172],[10,1,179,152],[13,103,48,156],[3,6,182,151],[0,3,166,167],[2,8,177,163],[117,34,163,156],[11,17,155,157],[17,9,184,159],[2,16,143,167],[1,8,168,175],[16,15,169,160],[4,10,164,158],[2,3,146,176],[15,14,155,173],[16,2,171,147],[15,17,159,174],[9,6,164,169],[5,8,151,175],[14,0,167,142],[16,3,164,144],[49,55,119,103],[178,117,179,142],[12,9,162,163],[1,14,153,167],[10,5,179,168],[1,8,147,149],[13,17,173,164],[5,11,146,160],[10,7,181,169],[16,11,160,160],[1,7,176,173],[16,7,159,156],[12,5,182,165],[8,8,180,169],[8,13,167,154],[11,4,152,152],[9,4,155,161],[7,13,152,156],[2,4,169,164],[9,7,152,155],[111,139,145,143],[10,8,153,156],[10,13,166,175],[2,9,149,159],[15,158,87,174],[7,1,161,148],[1,17,159,174],[6,3,146,150],[3,11,150,171],[43,17,53,49],[7,5,164,167],[156,9,184,55],[14,14,182,163],[45,48,46,133],[10,9,177,151],[12,1,156,173],[11,1,183,153],[16,8,167,159],[7,8,185,155],[67,36,70,117],[2,15,147,161],[12,7,171,156],[71,82,164,159],[18,1,161,161],[3,7,180,160],[0,4,158,159],[7,6,160,160],[1,5,175,166],[7,14,176,156],[5,9,185,149],[12,6,168,151],[13,57,24,62],[5,9,145,160],[3,0,153,148],[8,1,174,161],[9,15,150,164],[0,12,166,175],[3,6,157,171],[7,16,164,161],[1,25,25,27],[0,14,161,162],[2,0,161,162],[3,10,152,166],[2,15,156,174],[0,13,159,156],[1,12,183,157],[8,10,158,173],[36,83,184,167],[2,13,149,160],[3,17,173,169],[4,10,180,173],[1,8,169,170],[16,9,157,169],[14,3,162,172],[12,5,162,166],[3,7,154,153],[9,10,160,161],[13,16,167,171],[15,0,158,156],[18,4,158,161],[0,3,164,175],[13,1,178,161],[11,4,162,165],[0,11,151,175],[10,6,173,146],[11,7,174,156],[12,4,175,148],[2,13,142,174],[0,3,167,148],[10,5,164,161],[15,0,168,152],[7,1,151,161],[11,11,162,160],[11,11,167,175],[2,0,176,175],[11,15,174,157],[18,3,175,149],[4,16,172,168],[138,52,170,89],[7,4,163,145],[13,9,173,162],[4,3,149,168],[14,1,183,149],[3,2,182,166],[12,8,180,171],[15,11,167,153],[3,6,161,154],[13,13,185,153],[3,13,143,176],[11,16,159,164],[16,10,179,150],[0,4,157,174],[13,17,181,161],[9,17,155,175],[8,8,154,165],[5,2,163,155],[11,9,154,155],[18,7,185,161],[4,1,175,156],[15,8,157,162],[63,138,94,154],[1,1,171,176],[13,1,164,141],[166,135,182,135],[1,16,176,172],[11,15,155,155],[12,13,170,170],[25,75,51,76],[8,2,184,166],[6,15,166,164],[2,2,160,157],[160,58,184,85],[0,2,160,173],[18,13,183,157],[7,12,151,154],[7,4,152,153],[14,1,173,144],[11,1,161,153],[6,4,159,150],[5,14,161,163],[16,10,162,165],[4,12,170,173],[8,12,177,174],[7,7,153,173],[13,1,168,146],[9,8,171,150],[5,0,172,149],[8,12,168,154],[12,4,152,154],[4,6,151,146],[18,5,185,161],[4,4,181,171],[14,7,173,149],[2,2,152,172],[10,2,167,154],[14,11,160,167],[3,10,156,171],[10,0,151,174],[10,5,179,173],[12,13,173,173],[16,1,180,170],[18,8,162,152],[13,5,167,152],[2,8,156,173],[16,1,164,158],[2,1,151,172],[0,16,184,167],[4,2,165,163],[1,0,155,165],[9,76,39,90],[18,6,177,150],[4,1,182,146],[1,1,157,155],[0,5,157,147],[49,96,140,131],[8,14,154,160],[8,0,180,145],[7,16,147,160],[11,13,170,157],[18,6,172,170],[8,16,161,166],[18,13,180,162],[17,1,169,162],[16,11,166,160],[18,16,159,158],[14,6,183,175],[4,10,159,174],[18,12,179,153],[7,9,173,173],[18,0,184,150],[15,4,170,151],[30,23,138,124],[11,16,182,167],[16,10,163,164],[1,14,145,169],[11,8,171,165],[11,13,176,174],[11,3,157,175],[2,0,147,145],[4,11,154,176],[15,0,172,153],[94,5,130,143],[13,13,172,164],[11,17,178,159],[8,2,154,142],[3,0,160,168],[11,13,162,155],[6,7,165,161],[17,2,173,156],[143,12,169,85],[12,6,178,174],[2,16,170,162],[8,13,159,156],[116,151,121,153],[11,6,160,159],[4,14,151,156],[17,3,175,160],[13,13,176,168],[4,14,150,167],[10,9,165,175],[12,5,162,176],[17,1,184,155],[2,15,144,172],[3,9,176,171],[11,5,182,163],[11,13,158,162],[1,1,176,168],[3,7,178,171],[2,0,144,166],[101,137,165,146],[3,14,154,167],[1,15,179,161],[4,2,171,157],[5,14,179,154],[12,2,176,153],[17,2,163,168],[7,17,155,174],[125,165,157,168],[7,4,155,153],[0,10,142,173],[10,14,163,161],[13,6,160,149],[5,16,170,169],[16,3,157,161],[14,15,173,161],[17,16,183,160],[151,174,151,176],[109,49,110,124],[174,93,177,94],[18,12,163,169],[9,8,161,153],[0,0,160,144],[13,10,174,153],[15,1,164,145],[11,7,175,169],[5,7,145,162],[12,2,153,164],[34,12,109,63],[4,11,179,163],[5,0,155,140],[6,7,164,147],[10,10,154,173],[12,2,180,173],[6,8,153,163],[0,7,150,169],[16,1,171,165],[17,3,179,144],[9,11,185,161],[17,5,183,170],[12,10,182,156],[5,3,181,148],[8,4,169,168],[13,0,166,153],[9,9,179,166],[5,7,157,165],[6,7,166,164],[6,1,163,149],[0,3,163,166],[10,4,166,163],[0,12,179,166],[5,8,185,160],[9,4,161,158],[7,8,165,151],[12,0,180,151],[15,13,166,155],[3,8,157,148],[2,12,156,164],[17,4,175,166],[7,8,163,171],[4,13,175,155],[115,104,169,125],[4,0,165,150],[9,12,152,162],[3,1,185,170],[0,4,182,164],[3,11,162,160],[13,1,173,155],[1,11,160,154],[6,2,147,148],[0,9,184,150],[90,44,157,174],[1,2,182,149],[5,13,184,159],[9,12,167,172],[12,8,178,164],[6,15,164,165],[3,8,182,175],[11,15,158,165],[6,3,174,176],[0,6,143,158],[0,7,152,166],[6,12,172,167],[18,13,161,174],[4,11,175,159],[6,0,162,140],[0,3,156,174],[16,12,159,169],[8,16,168,167],[6,8,163,171],[9,0,180,151],[16,9,182,170],[17,14,180,157],[106,99,171,169],[10,168,23,173],[6,6,176,174],[103,17,103,39],[13,4,167,170],[55,105,148,168],[110,11,173,57],[4,16,151,165],[17,3,170,174],[5,0,152,168],[8,15,159,174],[10,14,181,163],[9,8,179,158],[6,3,166,148],[78,58,183,89],[0,2,178,173],[0,2,180,167],[8,11,153,167],[0,7,157,157],[0,2,175,159],[6,14,170,171],[7,13,170,170],[17,10,162,167],[22,6,52,171],[176,95,182,159],[134,95,163,107],[11,0,159,176],[8,4,179,147],[0,16,146,175],[81,26,91,35],[8,6,149,170],[10,10,167,173],[14,7,158,150],[14,5,156,148],[2,0,156,159],[1,7,146,155],[27,92,128,119],[13,12,157,176],[98,117,180,124],[11,0,172,174],[7,2,174,159],[17,7,176,149],[1,17,160,173],[18,7,178,154],[5,9,157,173],[17,3,159,158],[7,14,171,164],[11,0,176,163],[5,7,155,165],[7,3,169,152],[9,14,157,164],[5,6,168,153],[1,6,185,171],[2,16,177,162],[14,10,164,175],[7,10,164,165],[14,14,166,166],[1,3,176,175],[0,4,161,156],[7,16,172,171],[8,8,163,148],[8,6,177,157],[18,15,160,166],[3,11,181,173],[18,4,163,174],[18,17,164,176],[13,7,169,155],[46,168,131,173],[12,0,184,160],[8,6,159,151],[62,104,141,124],[16,1,164,158],[0,0,171,175],[2,1,172,173],[1,11,179,154],[5,14,185,155],[15,4,167,176],[5,5,177,151],[14,13,181,154],[12,4,172,164],[2,12,178,152],[1,16,184,158],[16,10,159,156],[3,2,181,157],[13,15,169,172],[9,6,160,163],[0,0,142,172],[5,11,148,162],[0,3,152,160],[4,14,162,156],[13,0,156,159],[6,15,172,172],[16,10,179,154],[13,17,155,163],[86,131,126,163],[95,136,163,169],[14,9,184,153],[9,1,175,162],[18,0,178,161],[15,2,173,172],[14,0,185,160],[3,4,143,153],[1,15,162,160],[5,15,183,164],[2,3,160,144],[10,4,162,164],[3,4,153,144],[13,11,164,165],[16,5,171,152],[175,45,181,176],[10,5,157,167],[13,16,178,167],[1,13,142,166],[5,10,149,155],[4,13,146,170],[2,3,179,160],[17,10,179,154],[13,3,180,167],[7,5,157,155],[14,2,164,155],[7,13,152,168],[8,9,173,171],[6,5,173,171],[16,13,160,172],[7,4,149,163],[1,12,153,173],[4,6,180,153],[18,5,181,146],[9,15,185,163],[2,15,173,162],[11,7,158,170],[17,5,174,148],[2,4,157,156],[15,7,175,167],[14,7,180,152],[10,2,184,171],[0,3,157,161],[3,13,181,169],[17,90,153,91],[0,7,163,176],[6,15,155,155],[16,9,167,149],[0,8,162,151],[6,4,153,155],[4,3,160,161],[3,3,152,158],[12,9,161,168],[15,7,184,168],[10,2,168,168],[3,16,177,172],[7,4,180,175],[1,0,180,160],[18,4,172,147],[20,91,179,110],[165,134,166,139],[18,4,176,150],[10,11,181,166],[12,3,175,164],[69,28,127,140],[0,10,152,168],[0,1,175,159],[8,14,167,167],[6,7,166,170],[10,17,171,168],[2,16,182,168],[3,5,172,152],[0,8,176,159],[126,126,140,128],[14,11,159,175],[8,14,183,163],[9,11,157,176],[8,6,181,154],[45,129,125,170],[6,5,181,175],[11,9,165,172],[11,16,166,162],[18,12,175,161],[7,15,162,159],[3,12,157,161],[0,9,180,158],[9,1,179,160],[10,148,146,176],[17,3,182,171],[10,67,171,165],[3,6,163,152],[4,14,174,157],[6,13,165,171],[17,13,183,165],[7,8,149,170],[5,6,168,157],[17,2,165,169],[5,7,182,176],[10,4,173,173],[18,17,171,175],[18,8,181,168],[13,9,157,176],[5,4,158,151],[75,55,112,87],[11,3,176,157],[12,14,178,164],[10,8,185,175],[18,3,178,145],[103,150,177,154],[15,1,171,156],[5,10,181,163],[8,11,160,173],[0,9,148,167],[6,4,168,168],[1,5,159,173],[12,3,173,144],[15,3,161,168],[13,2,177,156],[18,9,184,163],[1,5,160,146],[4,1,170,141],[66,87,69,117],[6,9,175,159],[9,0,175,151],[16,3,177,155],[13,1,155,167],[89,162,106,175],[67,20,106,117],[10,2,183,157],[2,2,175,153],[18,16,183,158],[6,11,167,155],[6,1,169,166],[14,9,181,160],[6,15,161,171],[6,9,168,151],[17,1,165,144],[18,5,160,152],[100,158,177,173],[5,1,150,151],[15,17,167,168],[119,133,181,146],[10,9,166,169],[6,0,148,172],[2,17,172,176],[13,1,157,149],[6,14,154,158],[9,3,150,168],[11,5,185,176],[1,4,172,171],[127,155,168,160],[12,7,181,172],[55,142,71,159],[18,9,166,155],[5,5,156,157],[11,12,169,156],[6,2,169,176],[17,3,182,146],[10,16,175,171],[7,11,165,154],[0,1,172,142],[18,5,175,164],[15,1,175,157],[3,9,150,169],[11,9,160,167],[15,7,183,155],[5,3,154,165],[9,4,158,159],[1,0,180,140],[18,1,160,170],[2,12,160,152],[3,13,147,171],[6,7,172,163],[18,14,180,163],[0,7,176,154],[5,14,182,161],[9,12,178,156],[0,7,163,173],[13,10,178,158],[13,11,166,153],[7,3,165,155],[14,0,184,167],[13,5,154,147],[7,14,153,175],[16,3,156,161],[3,3,149,147],[7,11,179,154],[6,3,152,169],[0,3,174,148],[8,3,159,174],[16,7,185,154],[1,7,145,166],[15,6,162,149],[6,0,153,149],[10,3,179,164],[18,10,174,171],[13,12,155,167],[6,1,181,167],[7,0,153,175],[17,11,172,174],[86,57,134,107],[1,11,159,154],[9,12,155,161],[2,11,176,168],[10,1,152,155],[8,0,159,165],[8,13,157,155],[9,9,166,174],[14,8,166,153],[1,13,154,158],[33,33,43,71],[1,13,181,153],[6,10,174,169],[132,54,133,108],[10,0,152,163],[6,4,161,154],[10,14,176,164],[9,0,177,148],[14,16,159,166],[15,4,177,161],[11,14,162,160],[142,123,180,123],[3,14,167,164],[8,10,160,158],[9,13,152,157],[9,1,154,167],[7,3,148,154],[7,12,174,171],[2,3,161,174],[2,13,182,176],[1,13,159,163],[16,7,156,170],[11,2,174,145],[13,11,155,170],[15,9,172,164],[10,0,169,175],[4,11,171,175],[5,14,162,173],[14,12,165,154],[3,17,159,163],[6,0,177,151],[145,70,168,132],[8,8,184,163],[10,11,168,171],[4,97,126,143],[1,0,153,157],[1,0,184,156],[12,8,185,158],[6,13,151,159],[1,13,149,169],[7,9,176,169],[1,15,178,158],[13,17,161,170],[10,12,170,164],[1,4,146,165],[8,14,152,154],[8,5,177,167],[3,12,179,165],[2,6,156,157],[9,0,184,165],[2,8,182,171],[9,6,157,153],[16,17,167,165],[4,1,172,169],[7,11,148,160],[18,14,184,166],[17,15,163,159],[1,0,173,173],[14,11,155,161],[139,172,149,172],[1,14,169,154],[79,135,82,137],[14,13,155,153],[9,10,182,154],[12,4,173,168],[0,5,181,145],[8,8,183,161],[16,8,159,153],[6,14,150,166],[0,4,158,146],[0,7,171,152],[3,10,175,165],[3,1,162,175],[7,1,177,175],[1,4,183,144],[13,5,164,165],[1,9,176,159],[14,12,163,158],[7,9,151,153],[6,2,178,143],[2,7,162,153],[6,2,182,144],[9,11,164,166],[10,6,185,167],[63,110,97,133],[3,3,150,160],[0,3,156,145],[12,5,180,160],[7,12,169,160],[6,0,179,163],[0,15,168,156],[6,4,161,171],[8,2,180,174],[10,5,161,164],[2,14,169,176],[6,12,159,166],[5,11,172,165],[22,5,163,172],[4,2,176,142],[0,12,143,168],[12,14,176,169],[8,7,174,148],[14,1,157,164],[4,15,185,156],[127,75,130,166],[16,14,181,163],[6,4,179,162],[2,16,154,168],[63,69,75,116],[167,21,177,50],[5,8,177,162],[10,11,185,157],[4,11,155,154],[13,1,171,161],[5,15,173,158],[1,0,180,152],[12,7,171,170],[2,11,165,175],[46,109,75,136],[2,15,175,173],[11,13,180,154],[18,9,177,154],[7,15,156,172],[3,0,171,154],[4,5,172,155],[156,21,166,165],[2,17,150,163],[12,6,155,155],[5,17,147,163],[3,7,174,169],[2,5,168,159],[11,2,160,142],[24,22,174,33],[11,10,171,167],[16,15,174,166],[12,8,173,165],[6,1,184,165],[16,4,158,154],[110,104,136,121],[5,4,150,165],[12,0,181,162],[6,14,168,156],[3,1,161,148],[3,3,172,163],[12,11,174,161],[13,9,162,173],[2,7,184,155],[4,13,178,154],[18,2,181,174],[7,15,168,162],[14,9,170,161],[1,11,158,162],[13,8,166,154],[12,2,157,170],[13,6,175,163],[150,141,150,147],[10,12,157,155],[14,10,165,157],[13,0,179,142],[8,14,176,165],[10,2,174,142],[151,66,158,158],[7,16,164,156],[4,1,177,166],[13,3,171,163],[5,11,167,156],[13,14,182,169],[8,1,176,147],[4,14,170,169],[1,14,160,173],[12,7,165,168],[4,6,174,151],[5,1,164,171],[15,0,177,159],[59,88,80,132],[3,9,165,163],[9,9,173,174],[7,13,148,163],[19,18,129,115],[8,3,173,152],[17,8,157,174],[7,3,169,164],[1,1,144,145],[6,13,154,159],[8,11,175,158],[6,10,149,155],[2,8,184,149],[16,16,180,173],[8,13,163,165],[15,0,165,165],[0,7,185,168],[18,6,185,175],[0,17,140,176],[4,8,180,151],[3,17,172,162],[3,9,161,149],[13,19,31,140],[41,80,138,105],[5,14,169,160],[16,7,159,164],[13,15,168,172],[2,13,177,154],[10,0,170,172],[10,5,185,171],[4,15,149,169],[1,9,185,149],[8,3,150,148],[2,8,173,176],[15,3,175,167],[1,11,178,175],[6,17,176,176],[16,9,185,167],[122,136,150,174],[12,4,154,166],[13,0,167,168],[6,6,161,166],[18,15,184,169],[11,1,151,159],[5,0,177,175],[0,17,147,158],[5,11,156,162],[9,9,171,151],[8,5,185,150],[9,12,171,163],[12,1,178,174],[15,13,173,173],[14,6,172,171],[9,16,171,156],[16,16,161,159],[12,8,171,174],[0,1,148,172],[1,12,168,162],[2,11,142,175],[11,11,185,152],[4,2,168,150],[1,10,160,166],[6,8,183,149],[6,6,183,170],[16,13,183,163],[4,12,179,162],[7,4,42,24],[12,14,181,154],[17,14,185,154],[10,0,164,165],[12,3,171,147],[18,1,183,155],[16,11,156,154],[16,7,183,162],[5,0,161,158],[3,13,160,158],[9,16,158,162],[6,14,147,158],[13,9,157,175],[10,5,153,166],[6,4,172,174],[9,13,184,160],[6,9,168,170],[7,3,165,145],[116,28,145,29],[129,10,135,21],[5,0,166,150],[18,6,162,151],[13,0,181,153],[103,124,147,175],[7,11,172,152],[2,10,153,152],[8,13,154,175],[122,69,144,110],[11,6,182,152],[6,9,149,155],[7,5,169,175],[1,17,146,167],[12,14,177,176],[10,7,166,163],[166,64,184,137],[4,16,144,173],[10,13,157,175],[17,9,171,164],[15,3,157,157],[3,9,174,150],[1,7,184,172],[7,14,181,172],[71,4,149,54],[18,10,161,164],[14,10,159,153],[18,2,169,154],[3,16,154,159],[7,10,181,170],[3,1,156,144],[3,12,180,165],[14,0,156,146],[6,0,162,159],[18,1,161,159],[13,14,181,163],[116,115,169,125],[178,7,179,161],[17,12,183,160],[10,12,156,166],[7,1,185,162],[2,3,152,156],[8,8,161,149],[4,9,177,173],[1,1,155,167],[13,7,155,166],[0,1,170,175],[11,2,161,167],[16,9,177,158],[2,12,174,169],[5,5,175,174],[7,7,166,158],[1,14,158,174],[2,9,154,165],[17,0,160,166],[100,15,137,175],[60,39,170,68],[9,14,183,167],[50,88,60,99],[15,6,161,168],[0,11,160,176],[6,1,156,172],[2,1,165,173],[1,4,154,157],[17,9,170,153],[3,14,148,174],[9,2,181,143],[0,1,149,162],[5,2,151,173],[1,3,144,145],[12,5,153,149],[5,8,179,161],[13,1,163,142],[15,1,169,167],[13,8,177,172],[6,0,178,161],[15,9,158,150],[15,1,157,143],[120,2,176,165],[14,8,174,157],[10,4,170,151],[3,9,158,172],[14,0,169,172],[12,2,184,163],[13,1,177,147],[3,11,166,163],[6,5,168,160],[9,4,167,151],[5,13,145,170],[6,16,168,168],[10,4,182,163],[72,84,176,169],[4,3,146,150],[4,15,180,156],[10,17,184,173],[9,14,182,156],[7,1,177,174],[16,0,164,171],[0,14,153,154],[3,14,172,171],[1,11,178,176],[7,3,159,166],[6,16,169,172],[16,8,174,167],[3,2,143,175],[0,9,169,150],[13,6,164,175],[14,1,159,172],[10,14,161,154],[139,93,184,100],[11,1,176,162],[3,12,169,152],[7,3,149,151],[12,7,181,149],[6,9,155,157],[11,6,164,150],[18,8,169,165],[9,2,177,145],[10,7,156,151],[18,158,185,175],[36,46,114,94],[5,0,150,171],[2,2,145,154],[9,13,178,173],[5,2,176,163],[11,4,172,170],[13,1,162,160],[16,8,178,167],[11,3,161,156],[10,4,155,159],[13,2,169,173],[16,152,156,167],[10,2,173,161],[7,3,174,169],[4,1,156,149],[5,0,161,149],[7,4,162,172],[64,150,111,170],[15,3,160,171],[174,138,175,172],[15,4,179,146],[11,1,176,154],[5,1,171,155],[16,2,174,147],[12,3,157,151],[5,1,169,145],[2,13,149,173],[3,11,150,154],[13,3,165,164],[14,4,179,169],[71,76,168,113],[0,17,176,170],[8,0,151,141],[1,8,172,173],[14,4,157,164],[17,1,164,171],[15,8,158,170],[4,7,167,155],[5,2,160,175],[16,2,162,142],[45,105,145,167],[2,7,165,159],[13,17,181,164],[40,33,58,69],[6,15,150,160],[2,5,157,164],[5,2,145,154],[0,2,160,151],[1,2,170,169],[6,6,182,157],[17,5,180,169],[7,3,165,164],[18,1,168,158],[10,5,175,165],[0,6,142,146],[11,4,164,144],[2,9,180,172],[6,9,150,173],[134,112,174,112],[0,8,161,164],[17,14,170,163],[4,0,165,155],[12,2,181,173],[5,6,170,169],[7,10,153,160],[3,9,143,167],[15,13,165,175],[7,11,156,173],[2,0,174,144],[0,7,172,166],[3,14,147,166],[15,12,167,165],[10,2,151,159],[2,2,180,142],[9,0,157,152],[6,14,180,155],[12,16,163,164],[1,17,153,160],[0,5,176,170],[6,12,163,170],[7,12,164,154],[11,2,159,172],[3,0,177,171],[18,0,166,148],[10,5,160,159],[15,4,184,161],[18,4,169,157],[83,137,130,156],[0,4,163,176],[5,0,167,145],[4,8,161,173],[171,123,175,130],[11,14,167,163],[15,14,168,174],[8,4,169,156],[73,8,126,89],[2,3,148,147],[15,7,178,159],[1,4,158,169],[1,7,148,150],[5,12,153,163],[4,12,177,170],[12,8,156,166],[4,0,157,149],[8,0,155,160],[13,10,182,157],[1,8,168,150],[6,15,164,171],[7,10,162,160],[13,9,173,163],[0,4,157,146],[11,9,153,172],[16,16,157,161],[11,8,169,152],[10,2,170,146],[10,2,175,145],[6,4,179,172],[150,159,160,172],[3,8,167,161],[11,11,168,171],[2,16,176,167],[0,1,178,147],[0,6,172,155],[1,9,146,151],[169,85,184,153],[5,1,148,157],[3,3,183,150],[12,13,173,160],[172,28,174,33],[17,13,167,164],[8,5,157,159],[14,0,154,165],[11,0,168,174],[2,10,155,175],[4,0,181,150],[122,21,181,93],[6,4,148,168],[2,6,163,175],[11,10,170,167],[11,4,162,160],[16,1,165,141],[0,4,153,163],[17,17,171,159],[2,10,147,156],[0,1,168,158],[14,10,176,175],[17,12,181,176],[61,76,69,163],[12,4,176,145],[5,7,156,176],[11,0,159,140],[0,13,172,157],[12,12,171,159],[125,159,136,165],[10,10,154,164],[139,161,143,174],[16,1,181,158],[9,0,171,152],[7,10,162,151],[7,5,182,148],[11,6,171,147],[10,17,182,162],[13,0,167,170],[113,34,160,111],[115,19,172,83],[11,3,183,144],[16,2,179,157],[9,0,151,155],[113,4,172,153],[5,10,168,169],[18,5,181,160],[4,5,152,159],[1,1,175,168],[4,64,157,127],[3,7,147,153],[6,5,183,169],[158,66,180,118],[4,158,144,158],[7,10,183,172],[12,4,182,157],[16,3,185,152],[10,13,160,161],[5,12,160,158],[9,12,173,167],[10,16,155,173],[9,7,164,162],[4,1,144,155],[4,13,167,173],[11,2,158,147],[3,4,182,171],[2,5,166,166],[17,2,184,163],[3,7,149,176],[18,7,161,170],[10,9,178,162],[12,4,173,173],[3,6,148,161],[17,16,163,164],[12,2,179,144],[9,14,172,172],[16,48,180,137],[5,6,168,151],[10,12,153,164],[2,9,148,162],[9,3,181,170],[6,13,174,164],[6,14,164,168],[7,3,157,159],[18,5,173,176],[2,9,158,165],[5,9,158,173],[8,5,162,150],[12,10,172,154],[16,11,173,161],[147,102,181,161],[0,11,175,171],[15,15,161,171],[17,1,176,176],[43,83,85,161],[5,7,179,155],[12,5,181,173],[13,1,153,165],[4,1,148,155],[0,16,165,174],[13,12,158,160],[1,13,155,157],[17,6,172,162],[6,5,168,148],[5,11,148,154],[9,16,152,168],[10,10,160,173],[15,3,170,168],[8,6,149,175],[8,14,148,176],[8,4,164,174],[3,14,150,155],[1,1,148,153],[6,8,183,171],[0,14,178,172],[100,64,112,162],[17,1,168,166],[12,0,157,149],[9,8,164,175],[7,1,148,174],[7,11,182,160],[9,0,165,168],[8,8,175,170],[6,14,155,169],[15,16,184,173],[12,6,179,173],[0,13,167,156],[6,7,154,159],[0,0,161,158],[10,1,182,152],[10,15,155,175],[2,17,160,165],[184,113,185,167],[30,117,76,155],[15,17,170,162],[18,3,184,154],[1,7,183,165],[10,16,161,176],[4,3,166,172],[18,0,158,169],[16,4,180,149],[14,10,154,158],[4,12,162,159],[18,3,180,155],[5,2,182,167],[11,11,166,163],[0,10,146,170],[185,157,185,173],[12,11,170,161],[18,4,165,172],[8,3,150,147],[9,1,153,156],[11,2,160,165],[140,106,171,170],[7,10,157,170],[9,14,166,163],[7,11,168,176],[1,0,169,163],[16,11,161,164],[154,164,164,175],[13,14,171,161],[1,17,156,168],[7,2,164,163],[6,5,162,155],[15,7,160,152],[6,7,172,158],[1,7,165,152],[10,1,167,156],[17,4,166,170],[167,9,171,84],[9,0,180,173],[6,8,173,148],[3,7,184,154],[9,17,163,163],[15,0,184,146],[7,7,174,155],[2,16,170,171],[14,1,172,147],[11,5,177,146],[136,140,158,151],[6,12,155,174],[11,0,154,176],[3,13,153,154],[3,7,155,175],[14,0,155,166],[7,9,181,167],[11,4,177,152],[14,9,180,168],[4,2,169,176],[9,0,153,159],[18,0,178,163],[10,5,168,168],[4,0,173,158],[17,2,159,160],[17,12,185,162],[7,9,185,175],[18,15,161,168],[8,3,166,162],[13,3,167,149],[0,9,172,172],[12,9,183,172],[17,3,180,144],[7,17,172,161],[17,1,168,169],[25,32,131,152],[169,26,172,89],[0,10,146,84],[3,6,154,156],[1,11,178,165],[6,2,179,148],[8,1,166,142],[10,11,181,167],[13,17,163,161],[9,8,181,162],[5,1,177,174],[12,6,181,150],[2,12,172,162],[76,0,150,33],[4,15,165,172],[9,2,160,173],[12,0,159,175],[7,1,161,157],[18,3,164,155],[12,1,157,172],[3,17,171,175],[11,3,162,143],[11,14,181,169],[5,14,180,159],[9,8,159,172],[18,9,165,167],[12,5,183,157],[5,22,73,73],[12,5,172,154],[7,4,168,159],[17,1,160,168],[15,11,157,161],[1,3,147,162],[12,17,168,175],[6,9,159,165],[81,89,104,117],[5,3,147,144],[8,9,157,153],[171,58,171,168],[3,2,158,152],[8,15,172,165],[9,17,149,157],[172,14,177,20],[11,13,169,156],[18,5,159,167],[11,0,169,150],[12,6,182,173],[9,16,179,170],[1,13,149,170],[12,0,156,140],[11,16,168,160],[3,1,180,168],[6,7,185,163],[9,5,174,169],[9,6,180,146],[4,2,144,172],[5,17,174,161],[10,8,182,161],[10,15,158,172],[5,3,154,154],[1,9,142,167],[6,4,170,145],[120,31,166,129],[6,13,157,158],[45,105,57,137],[14,13,185,163],[7,12,150,157],[7,2,160,176],[0,3,147,168],[15,7,161,163],[12,17,156,170],[7,2,156,158],[95,141,161,161],[17,7,166,172],[17,0,168,148],[12,14,175,158],[18,1,172,173],[156,41,169,166],[61,167,144,176],[3,3,184,151],[112,122,114,157],[7,17,174,171],[12,6,171,148],[10,3,155,154],[2,2,165,164],[18,2,159,151],[16,10,185,157],[17,3,160,152],[17,4,172,170],[10,5,181,146],[1,13,148,157],[5,2,149,176],[11,0,174,145],[0,18,154,176],[5,12,177,157],[5,1,150,143],[16,3,160,161],[3,2,170,172],[14,1,173,166],[11,9,184,169],[9,6,167,150],[15,1,167,165],[11,14,170,175],[2,9,159,176],[7,17,182,161],[5,7,180,171],[10,5,167,159],[1,4,176,159],[17,7,165,159],[16,15,170,158],[2,14,152,155],[9,16,160,159],[2,5,150,171],[7,5,154,156],[64,52,162,145],[10,3,153,167],[15,7,164,165],[0,6,151,175],[4,2,150,165],[4,11,167,156],[16,1,171,145],[6,11,174,153],[5,3,152,173],[3,8,179,154],[11,3,153,157],[7,0,160,140],[0,4,167,155],[1,15,154,164],[10,10,171,152],[17,5,164,145],[15,4,173,156],[18,1,173,165],[8,6,166,163],[0,1,170,171],[5,8,150,168],[18,11,168,171],[15,10,184,166],[13,118,31,118],[17,4,184,154],[9,7,156,165],[2,3,165,167],[6,7,176,153],[11,3,158,172],[7,4,171,168],[162,15,182,71],[1,0,185,174],[14,4,156,160],[0,8,176,154],[17,8,163,168],[4,11,145,154],[7,15,171,161],[13,13,183,171],[3,0,157,159],[140,53,148,109],[7,8,159,163],[7,3,174,163],[11,16,159,170],[9,12,149,155],[4,0,151,155],[47,48,113,150],[14,2,185,168],[13,12,169,154],[15,6,180,170],[9,13,153,154],[0,11,142,168],[0,10,170,166],[11,7,171,156],[16,14,167,166],[2,7,169,174],[1,11,149,153],[15,5,168,161],[10,1,185,158],[2,0,150,148],[12,10,152,165],[14,7,157,174],[8,3,152,148],[16,16,166,164],[0,5,182,157],[56,113,105,156],[14,0,154,156],[0,8,155,149],[14,10,176,169],[1,9,185,153],[12,16,160,174],[10,1,173,153],[47,113,151,132],[8,12,178,175],[164,120,166,141],[8,14,177,160],[0,2,169,172],[9,11,158,163],[18,16,183,165],[2,10,167,175],[3,0,150,163],[128,61,147,85],[75,3,100,73],[48,26,71,124],[104,21,165,72],[9,4,167,145],[11,8,151,174],[11,13,176,154],[9,17,173,161],[17,13,164,171],[16,4,181,147],[4,3,185,168],[5,10,177,166],[3,11,154,166],[1,1,169,170],[4,47,39,175],[133,155,140,167],[5,7,175,151],[1,0,141,150],[5,4,180,160],[16,1,181,165],[3,6,175,153],[2,2,158,159],[18,14,177,161],[15,10,157,153],[6,12,147,165],[0,4,177,150],[11,15,170,161],[7,6,155,147],[11,2,182,145],[11,0,162,168],[4,7,151,173],[10,7,170,169],[17,14,175,172],[9,3,172,165],[1,1,161,172],[14,1,164,158],[16,15,177,162],[2,3,145,173],[70,20,92,154],[13,16,174,176],[11,0,164,144],[17,2,163,159],[2,4,159,169],[5,11,178,163],[11,17,169,165],[8,5,170,165],[10,1,169,158],[3,5,173,166],[17,9,167,174],[9,0,185,176],[13,1,173,152],[7,9,170,151],[18,4,182,174],[12,0,183,174],[18,0,176,163],[4,2,146,145],[1,5,170,163],[47,173,104,174],[10,12,169,160],[5,9,164,170],[16,15,172,172],[3,3,171,154],[16,9,160,166],[0,0,142,150],[3,7,182,166],[13,14,169,160],[11,0,170,158],[8,2,185,152],[5,9,181,165],[0,1,142,152],[0,4,178,175],[1,9,170,149],[18,2,172,176],[10,17,178,164],[2,2,175,155],[14,0,166,140],[10,1,175,175],[4,14,183,161],[15,11,177,164],[5,3,168,176],[9,5,177,174],[6,5,163,156],[13,4,162,145],[0,3,153,153],[15,17,164,173],[1,14,154,159],[16,14,180,158],[0,11,155,175],[9,3,173,173],[5,4,146,145],[1,7,151,154],[13,16,168,161],[15,11,168,168],[3,16,145,172],[8,16,157,157],[4,12,171,154],[9,0,180,160],[14,11,182,172],[9,17,152,162],[15,8,167,176],[3,13,143,170],[15,9,181,163],[3,16,180,163],[16,4,167,169],[1,11,181,160],[8,9,168,164],[4,3,153,173],[175,9,175,148],[13,4,164,160],[16,6,158,147],[10,9,182,175],[8,4,157,160],[3,2,150,170],[8,2,167,152],[15,9,184,170],[15,4,155,165],[2,13,145,169],[151,129,169,142],[3,3,181,153],[16,6,178,153],[12,3,153,151],[168,81,181,115],[12,8,174,162],[15,17,185,160],[15,9,172,175],[16,4,171,160],[13,10,178,166],[17,6,170,157],[0,10,182,152],[0,2,147,153],[4,2,155,146],[13,3,183,163],[3,9,172,165],[1,0,142,152],[102,74,110,145],[108,154,154,173],[14,14,161,172],[6,6,154,153],[9,16,184,161],[14,12,170,172],[14,4,172,173],[9,4,160,156],[4,15,179,175],[11,14,170,160],[7,15,177,171],[16,6,164,152],[14,13,181,173],[7,9,165,162],[3,9,168,168],[3,2,160,161],[7,10,177,176],[6,10,174,150],[12,9,176,164],[11,11,154,153],[179,153,185,157],[6,1,149,156],[1,3,172,176],[11,12,151,168],[3,12,152,171],[8,10,168,163],[2,12,152,175],[15,8,168,161],[18,3,174,152],[7,6,164,149],[11,13,182,176],[4,10,152,166],[6,2,160,170],[1,13,152,176],[13,13,169,155],[3,9,165,176],[0,14,156,155],[13,3,169,174],[11,11,157,172],[18,1,179,175],[4,7,166,172],[13,12,169,158],[1,10,142,176],[4,8,167,165],[11,14,170,156],[3,3,171,160],[15,1,165,153],[8,14,183,155],[7,10,152,159],[1,3,170,150],[1,11,161,151],[14,11,162,168],[4,5,144,152],[13,12,170,152],[2,11,174,168],[12,3,173,152],[8,14,184,164],[9,7,185,160],[10,16,163,160],[12,2,167,165],[7,1,149,147],[14,14,161,158],[11,0,155,158],[13,7,164,162],[13,1,168,148],[13,2,170,142],[14,15,163,165],[8,15,160,168],[15,12,181,154],[6,4,173,172],[0,10,166,160],[3,0,174,163],[8,0,176,141],[2,0,149,170],[12,9,161,168],[17,5,169,149],[1,2,180,165],[6,1,173,176],[0,13,168,162],[15,5,157,173],[102,127,164,150],[6,1,159,165],[7,12,171,176],[65,138,98,145],[18,6,171,146],[16,3,183,164],[12,8,182,153],[160,176,167,176],[2,12,151,158],[64,89,101,145],[17,2,169,145],[8,0,170,163],[11,163,73,172],[10,5,168,151],[18,15,173,166],[6,11,173,164],[8,16,172,171],[9,16,171,163],[12,8,158,172],[4,12,169,173],[4,6,151,170],[18,14,184,154],[17,0,168,164],[9,7,171,153],[3,12,185,176],[126,28,158,135],[0,5,151,172],[0,11,154,173],[12,5,163,156],[4,1,151,174],[6,14,173,168],[9,0,181,172],[101,158,170,167],[9,13,170,166],[2,5,181,170],[5,14,152,169],[4,3,168,166],[133,70,156,86],[1,1,164,149],[4,5,151,167],[8,5,156,153],[10,0,150,147],[0,4,171,148],[8,0,175,156],[7,3,168,150],[15,8,161,150],[181,138,184,141],[11,6,179,160],[2,5,170,163],[133,69,163,150],[12,12,174,174],[12,7,182,160],[9,5,157,148],[2,17,183,161],[2,0,152,154],[6,8,163,173],[3,6,157,158],[12,15,178,157],[17,4,180,155],[11,0,171,166],[1,5,158,173],[14,7,180,163],[13,8,172,163],[16,13,160,174],[7,6,184,146],[0,2,149,143],[12,7,173,151],[8,1,163,163],[0,1,184,165],[2,4,142,154],[1,4,144,148],[70,14,171,66],[15,4,179,172],[9,14,176,162],[0,6,141,148],[0,7,180,176],[7,5,165,158],[1,8,144,158],[11,13,156,169],[13,6,156,161],[9,15,155,168],[0,11,145,153],[18,12,175,166],[16,4,165,159],[2,13,154,167],[12,11,174,165],[6,2,147,170],[1,6,165,161],[5,2,165,160],[3,10,156,168],[12,17,158,172],[148,63,182,142],[11,1,159,162],[11,11,176,176],[70,163,140,169],[15,16,170,160],[0,2,150,160],[3,15,173,162],[0,9,169,151],[122,84,136,112],[9,15,163,159],[0,13,150,163],[2,3,143,176],[7,1,177,166],[3,12,179,161],[12,6,174,167],[18,13,183,167],[13,10,160,171],[4,2,146,155],[4,16,173,164],[14,8,176,153],[3,12,169,171],[0,15,176,168],[11,10,166,172],[10,9,184,157],[3,1,149,170],[7,11,176,165],[5,8,151,152],[4,15,170,160],[9,2,150,161],[0,8,150,176],[10,15,185,157],[14,9,165,152],[1,5,164,149],[13,2,180,165],[5,13,162,155],[11,14,154,158],[1,4,158,161],[12,2,152,161],[11,4,180,162],[18,0,182,157],[13,10,174,156],[12,4,169,153],[12,2,154,143],[4,3,158,156],[14,7,184,159],[13,7,155,148],[142,46,183,52],[98,82,158,100],[2,9,175,168],[3,7,176,152],[9,8,166,163],[132,107,179,144],[153,106,167,138],[4,2,184,165],[4,9,148,172],[14,7,184,157],[5,10,167,164],[4,9,185,166],[18,2,167,175],[140,171,181,172],[9,4,161,155],[39,122,102,126],[15,8,156,166],[8,2,182,166],[106,165,165,165],[150,105,155,149],[125,118,168,147],[17,11,170,172],[11,2,168,169],[12,2,172,166],[18,6,172,149],[9,16,159,158],[2,1,153,147],[16,0,183,163],[153,150,185,155],[5,16,169,157],[6,1,173,148],[51,89,113,91],[6,10,158,154],[4,14,150,169],[14,4,177,163],[8,10,154,175],[5,1,180,147],[18,71,77,95],[14,5,157,162],[10,15,162,159],[12,1,152,169],[8,12,171,154],[1,2,151,143],[11,0,184,163],[14,1,169,164],[1,17,152,168],[2,4,150,152],[16,0,165,157],[4,12,175,158],[1,7,153,168],[3,16,171,173],[14,17,161,174],[12,1,158,163],[10,6,163,151],[7,13,157,172],[12,0,158,157],[8,0,153,163],[4,0,144,164],[8,16,179,160],[11,14,153,162],[2,10,155,169],[3,8,151,153],[9,3,182,161],[95,68,135,144],[4,8,166,166],[12,4,180,167],[16,2,184,151],[13,1,179,159],[17,12,166,165],[16,5,185,158],[7,16,161,171],[10,12,183,155],[35,150,85,151],[2,3,164,144],[115,161,134,169],[0,9,157,152],[17,5,167,169],[5,16,175,161],[9,0,168,161],[9,2,175,152],[5,13,146,158],[8,6,183,168],[6,5,182,148],[16,16,170,164],[13,9,171,166],[4,8,162,175],[1,16,171,167],[6,1,185,160],[4,0,182,170],[8,12,172,159],[7,3,169,176],[16,13,170,159],[7,87,30,118],[14,16,176,168],[5,9,173,165],[13,17,178,173],[1,0,149,141],[31,2,95,161],[15,0,175,143],[13,12,166,174],[16,4,183,154],[3,14,169,175],[14,8,164,156],[8,0,165,167],[8,17,181,159],[18,11,178,163],[4,7,161,165],[18,17,181,160],[12,16,153,158],[3,13,185,154],[11,9,182,170],[6,7,179,167],[0,15,182,155],[1,5,145,151],[11,1,176,150],[18,16,182,163],[17,17,161,162],[1,3,141,163],[9,5,177,165],[13,1,154,161],[1,4,172,153],[10,1,183,142],[137,13,153,91],[4,15,160,176],[10,3,177,157],[11,0,175,165],[1,9,162,154],[10,4,168,154],[0,13,162,161],[16,10,164,164],[15,15,173,156],[18,14,185,172],[17,13,180,173],[8,15,160,170],[4,16,184,175],[5,5,166,167],[12,5,156,173],[13,4,154,162],[51,90,175,154],[5,3,145,161],[1,8,170,168],[13,7,172,171],[14,13,169,156],[4,9,156,162],[67,128,87,134],[10,3,154,152],[3,14,162,163],[10,3,174,153],[0,3,144,172],[4,17,163,168],[16,13,162,156],[3,8,149,155],[4,7,178,166],[14,17,167,169],[14,3,169,149],[10,15,163,167],[9,14,183,172],[1,9,177,160],[174,3,178,7],[2,4,155,172],[8,13,165,175],[9,9,183,158],[4,5,149,169],[2,10,165,163],[10,15,166,156],[18,12,169,163],[6,8,164,149],[6,5,157,168],[3,2,146,144],[3,7,150,160],[45,23,68,93],[3,8,160,174],[8,12,166,162],[11,13,178,153],[6,6,150,157],[7,3,148,147],[1,2,173,155],[17,12,160,160],[138,108,168,142],[10,13,181,170],[4,13,168,159],[2,5,174,170],[12,8,153,167],[9,15,152,172],[8,1,149,168],[11,16,171,164],[3,8,145,173],[2,6,176,152],[17,12,179,159],[2,17,182,172],[1,6,159,150],[9,11,150,161],[16,5,164,153],[2,9,182,150],[11,13,167,153],[10,9,172,156],[6,0,148,149],[1,3,174,153],[6,1,160,166],[11,6,169,168],[17,0,182,173],[2,8,184,152],[13,0,158,153],[16,6,165,161],[7,1,154,174],[1,5,159,151],[17,4,158,158],[2,17,147,161],[9,3,155,147],[8,1,155,148],[5,11,160,156],[1,3,141,143],[2,17,182,158],[2,5,164,147],[14,15,184,156],[3,0,149,160],[2,0,171,160],[10,5,169,169],[68,167,130,174],[16,11,179,175],[0,12,143,176],[69,43,91,128],[14,6,155,163],[9,7,164,174],[4,10,148,155],[11,13,162,159],[2,13,165,176],[12,14,159,156],[8,16,184,169],[6,9,168,161],[2,7,158,159],[14,12,158,164],[1,6,149,171],[3,16,164,161],[12,7,170,168],[12,2,170,171],[8,2,159,173],[16,18,19,149],[9,8,154,176],[57,152,58,157],[3,9,174,155],[142,164,185,175],[18,13,165,159],[11,0,151,161],[17,5,181,152],[3,10,160,165],[7,4,154,172],[2,11,179,152],[13,3,183,166],[7,5,154,146],[6,2,152,160],[6,2,176,156],[14,2,180,166],[10,17,158,171],[18,5,167,158],[1,11,152,166],[3,13,169,158],[15,9,174,167],[15,8,157,171],[3,14,147,157],[13,14,168,166],[9,11,175,168],[5,16,173,164],[7,13,148,175],[20,103,94,148],[10,42,159,153],[5,0,183,144],[16,16,161,157],[9,13,182,172],[2,0,152,157],[2,1,154,143],[13,7,185,154],[6,1,156,158],[1,6,164,146],[17,8,172,165],[3,5,176,161],[1,11,142,175],[6,6,185,148],[3,3,165,144],[15,9,161,152],[17,10,157,152],[4,7,162,147],[144,68,150,138],[9,7,181,164],[7,7,175,162],[11,2,167,176],[14,16,158,165],[1,14,180,168],[4,3,146,166],[6,10,172,152],[10,2,156,176],[0,17,174,163],[9,6,154,168],[14,11,175,176],[1,10,168,176],[18,5,164,161],[16,14,182,165],[12,1,172,175],[0,5,152,145],[1,9,165,174],[18,9,161,169],[3,17,149,162],[4,0,155,141],[168,45,174,142],[8,8,165,171],[2,3,147,155],[2,13,182,170],[7,8,150,153],[18,4,175,149],[51,38,60,158],[2,12,148,175],[8,3,152,162],[1,12,184,159],[14,7,172,168],[9,0,185,176],[10,8,172,154],[17,3,163,144],[3,3,147,153],[2,9,173,174],[4,6,175,154],[7,7,158,173],[11,4,182,155],[10,14,156,166],[14,7,168,150],[141,103,148,109],[4,10,167,164],[54,109,148,168],[106,102,129,167],[2,4,147,161],[15,15,169,166],[11,2,152,159],[10,12,153,175],[8,10,160,165],[9,0,177,147],[3,16,143,157],[17,15,182,169],[15,5,184,148],[3,6,152,147],[32,43,113,108],[11,3,157,158],[13,7,172,174],[17,5,173,171],[3,1,145,164],[11,0,159,164],[3,17,156,175],[12,6,164,152],[6,22,27,53],[7,1,154,160],[9,9,167,157],[8,13,171,171],[14,9,166,151],[4,15,154,167],[15,12,161,154],[2,2,179,158],[7,4,164,164],[16,4,184,154],[4,85,172,104],[0,44,164,69],[0,5,176,163],[18,16,176,157],[12,5,161,162],[0,11,159,165],[15,4,170,148],[6,6,168,170],[9,17,181,163],[8,12,156,168],[16,9,184,175],[6,12,176,157],[6,9,148,174],[52,74,69,157],[1,7,172,156],[15,6,173,166],[17,8,164,158],[16,0,158,155],[9,13,161,172],[14,45,130,55],[10,8,172,161],[16,7,168,152],[12,4,183,175],[10,13,157,167],[1,6,161,172],[7,15,173,169],[17,7,179,155],[1,8,184,175],[9,1,172,162],[17,1,158,157],[10,3,183,165],[14,11,164,161],[9,5,160,159],[13,7,162,159],[8,16,169,176],[16,10,169,166],[5,9,166,164],[2,0,158,169],[2,11,156,160],[157,168,177,169],[9,9,167,160],[0,3,150,144],[9,11,149,152],[6,5,178,159],[7,5,185,154],[1,2,174,144],[15,13,171,176],[5,1,160,166],[6,15,182,174],[6,0,176,143],[13,9,166,161],[7,4,157,164],[10,12,157,154],[10,9,172,171],[0,15,153,167],[3,16,184,176],[3,13,147,158],[15,9,168,164],[11,0,167,161],[8,0,153,168],[11,7,155,161],[12,13,176,155],[3,11,174,158],[161,147,174,160],[2,6,175,153],[9,6,169,162],[1,6,144,171],[13,6,158,160],[14,10,180,162],[14,7,154,149],[5,4,183,145],[1,0,175,143],[15,14,178,176],[13,3,159,152],[4,14,168,170],[184,4,184,41],[8,11,162,169],[10,4,161,159],[6,6,179,155],[18,3,167,165],[3,9,164,149],[8,1,167,163],[18,2,183,149],[6,8,157,155],[14,5,164,151],[5,7,153,158],[8,2,165,164],[14,2,182,174],[14,13,182,170],[6,4,158,165],[16,12,172,174],[91,13,128,95],[8,10,184,172],[8,17,184,175],[18,6,171,176],[1,17,151,168],[8,6,174,171],[9,3,165,166],[1,9,149,174],[116,52,141,112],[4,5,170,164],[5,0,159,144],[7,17,184,167],[0,11,154,152],[9,10,166,176],[0,1,141,163],[3,3,156,155],[0,13,153,161],[10,0,163,163],[2,13,151,155],[2,7,166,153],[16,4,166,165],[9,4,153,167],[13,6,173,170],[5,7,153,167],[10,14,166,157],[9,5,149,171],[4,2,171,158],[8,8,167,151],[16,16,184,171],[5,0,172,158],[1,6,154,159],[89,25,115,100],[17,8,169,151],[8,11,184,169],[10,15,173,165],[181,124,184,145],[13,3,169,147],[126,158,155,168],[3,9,166,156],[4,10,185,170],[5,2,153,144],[19,81,103,106],[6,2,164,155],[8,1,175,144],[0,1,155,150],[68,53,145,138],[14,1,162,144],[5,9,158,172],[2,16,185,174],[7,5,160,175],[17,1,157,161],[5,4,152,166],[2,10,177,151],[5,12,184,155],[1,11,143,161],[51,8,184,118],[4,0,173,155],[8,11,174,167],[9,5,183,166],[4,5,179,163],[2,1,178,144],[15,17,161,174],[7,4,158,150],[7,9,156,166],[18,17,178,162],[3,13,158,169],[112,40,161,165],[10,3,155,153],[7,14,173,155],[1,9,172,172],[9,6,178,151],[6,10,176,169],[10,3,174,161],[15,6,168,167],[4,11,145,164],[8,12,163,169],[9,10,185,157],[7,5,159,176],[0,4,174,151],[15,0,184,156],[9,6,175,166],[8,13,158,158],[11,5,168,145],[5,0,145,169],[16,8,168,159],[56,12,116,56],[12,9,160,168],[12,1,152,166],[10,10,179,173],[7,16,167,161],[5,13,147,176],[13,13,174,154],[3,3,158,158],[9,12,168,176],[8,16,176,158],[0,0,152,175],[12,3,181,149],[1,8,156,151],[2,1,176,158],[4,0,149,141],[6,3,166,162],[9,8,180,162],[12,8,172,171],[127,128,156,160],[11,6,177,159],[3,8,157,153],[12,13,174,165],[7,15,162,176],[94,40,173,66],[1,9,141,162],[94,37,99,94],[2,5,150,153],[12,6,160,175],[1,14,172,156],[11,1,165,174],[11,13,181,155],[1,4,185,151],[79,78,89,165],[10,8,171,164],[0,2,183,165],[12,9,170,149],[46,80,88,137],[17,0,172,153],[16,11,172,160],[6,3,171,167],[15,1,162,170],[16,0,170,149],[10,12,177,155],[0,2,166,173],[7,0,157,172],[4,14,163,164],[17,12,184,176],[18,12,180,173],[0,16,156,176],[5,8,156,155],[5,14,180,168],[18,9,185,150],[15,3,179,164],[1,10,165,176],[9,9,155,155],[81,24,116,127],[14,11,156,164],[4,10,148,152],[0,3,146,158],[13,15,164,170],[17,1,182,147],[7,14,179,155],[13,15,164,163],[13,6,153,150],[0,13,163,163],[0,17,178,159],[9,0,171,166],[14,1,163,146],[6,14,153,159],[1,15,179,169],[17,115,30,140],[16,13,159,164],[5,4,171,145],[7,1,177,144],[8,10,157,169],[19,124,52,153],[7,9,173,176],[6,0,150,151],[5,4,169,165],[6,7,175,170],[2,0,182,174],[16,6,185,149],[15,7,168,173],[7,8,166,148],[8,14,160,154],[10,13,160,165],[16,1,174,173],[0,0,185,163],[6,4,166,145],[1,16,164,159],[9,17,172,171],[16,10,178,171],[63,37,78,120],[0,1,175,144],[1,0,180,173],[144,156,183,174],[4,0,179,149],[18,123,131,155],[11,10,162,156],[9,9,163,155],[14,6,166,149],[5,4,184,175],[13,12,182,170],[4,11,161,166],[66,121,67,121],[0,15,176,169],[12,9,172,168],[164,162,168,173],[8,9,168,155],[6,5,180,172],[9,0,150,164],[3,7,145,161],[2,15,154,155],[0,6,151,175],[18,6,159,162],[21,47,101,123],[17,8,173,153],[18,2,162,157],[16,14,159,159],[3,14,151,154],[18,1,172,162],[51,165,117,165],[18,17,175,165],[12,9,178,155],[8,12,159,166],[8,8,150,172],[16,13,156,171],[9,17,159,166],[34,4,181,42],[2,0,182,143],[3,14,177,170],[18,7,167,165],[6,8,150,155],[5,14,160,164],[2,14,164,155],[16,5,160,158],[11,9,173,156],[14,5,181,168],[17,0,180,141],[163,121,176,154],[9,0,180,142],[12,5,166,169],[9,13,181,153],[14,10,165,174],[2,1,180,162],[3,11,153,173],[17,6,171,147],[6,5,157,147],[8,14,151,168],[9,11,181,172],[15,0,176,148],[17,6,163,161],[10,14,163,173],[1,1,172,145],[16,14,163,163],[80,16,88,97],[1,9,179,167],[7,3,166,150],[90,9,167,124],[3,13,147,169],[90,16,146,114],[4,4,153,162],[16,15,167,160],[2,16,148,157],[3,17,152,171],[12,2,152,158],[14,4,166,156],[12,5,172,168],[3,0,145,176],[15,1,184,164],[8,10,166,176],[1,11,183,162],[3,6,161,161],[0,7,167,167],[0,11,159,158],[18,0,177,167],[23,75,92,105],[9,14,182,154],[4,0,156,147],[9,4,150,167],[6,10,150,160],[15,4,157,157],[17,2,158,159],[18,5,158,166],[1,3,146,150],[4,5,159,152],[18,8,175,175],[7,17,153,159],[6,6,159,153],[4,11,155,155],[6,11,160,176],[1,9,176,171],[7,3,158,144],[16,6,156,153],[16,3,162,172],[15,11,185,171],[6,13,150,158],[8,7,154,168],[8,1,156,153],[2,1,146,156],[10,16,151,156],[10,1,160,146],[14,4,158,173],[16,3,175,161],[5,3,148,159],[9,3,167,143],[1,5,145,163],[18,11,161,175],[1,17,141,166],[9,5,150,153],[13,13,161,160],[1,7,179,157],[9,17,149,171],[12,4,153,163],[10,2,164,152],[4,14,169,155],[8,9,183,166],[0,12,171,173],[10,7,162,157],[177,95,184,137],[5,1,171,164],[18,1,170,154],[15,15,172,175],[6,11,162,172],[7,13,173,157],[161,40,162,82],[6,13,184,159],[12,15,180,164],[4,13,146,169],[3,1,163,149],[0,0,152,174],[15,1,164,152],[7,6,181,154],[12,6,170,150],[4,17,174,157],[4,3,170,162],[11,10,178,155],[2,0,165,175],[3,9,159,166],[8,5,185,166],[5,11,152,158],[14,5,185,159],[10,6,170,148],[6,4,163,156],[14,6,178,169],[12,15,157,162],[3,13,182,164],[6,3,156,155],[17,4,167,147],[2,15,151,172],[10,15,159,172],[5,13,177,166],[12,3,181,159],[174,47,183,76],[3,12,183,175],[7,6,174,174],[18,12,161,161],[10,1,153,158],[3,12,165,176],[15,8,162,155],[13,5,156,167],[4,6,180,175],[3,12,178,158],[3,4,156,163],[1,17,154,165],[1,13,159,159],[2,3,159,171],[29,71,175,136],[6,7,162,173],[63,173,162,175],[16,8,174,163],[7,6,185,176],[1,14,161,157],[141,146,151,157],[8,1,153,150],[6,1,182,163],[13,5,177,176],[13,7,158,165],[1,11,163,153],[14,10,169,158],[4,14,169,155],[14,17,178,162],[25,140,177,171],[10,2,177,168],[0,13,160,173],[3,1,152,152],[3,9,157,163],[5,6,183,168],[14,13,168,168],[54,148,109,175],[3,16,183,172],[7,15,182,156],[15,17,166,174],[144,145,145,161],[18,2,171,150],[9,15,169,166],[3,7,149,156],[4,1,176,161],[14,2,162,160],[11,16,164,162],[9,8,185,157],[5,8,149,171],[12,10,155,172],[13,9,156,162],[161,115,185,167],[8,10,149,164],[10,4,181,167],[15,15,176,171],[17,9,161,154],[17,6,161,159],[12,2,175,154],[174,114,185,119],[5,1,161,151],[108,46,108,145],[13,4,159,149],[10,7,150,161],[10,4,166,151],[8,6,182,170],[11,1,181,145],[0,15,152,165],[9,2,184,156],[10,16,176,164],[4,3,178,169],[17,8,165,163],[3,8,172,162],[13,16,183,176],[129,107,151,117],[4,13,168,173],[11,5,162,176],[12,11,153,163],[135,44,158,89],[16,2,174,161],[6,8,163,158],[11,17,165,164],[7,1,172,162],[14,0,174,140],[9,7,180,171],[4,11,157,168],[159,110,161,134],[16,17,180,157],[15,6,182,148],[16,8,178,155],[2,0,160,145],[3,5,181,175],[2,7,172,169],[7,7,150,171],[9,3,171,172],[4,10,170,172],[15,6,166,146],[15,0,178,165],[5,8,173,176],[3,4,165,165],[6,5,168,174],[56,137,112,139],[4,10,165,163],[18,10,162,174],[11,14,159,166],[0,3,157,156],[14,6,182,174],[14,14,164,156],[1,12,180,168],[12,16,170,175],[8,12,173,172],[18,9,180,150],[9,10,181,153],[9,0,156,175],[12,10,156,162],[9,17,173,167],[1,3,148,148],[10,1,176,176],[10,7,173,159],[1,11,149,169],[5,14,164,167],[3,3,157,171],[1,8,182,157],[9,10,184,176],[4,12,173,170],[15,7,171,167],[10,5,176,146],[3,2,180,175],[10,12,150,166],[15,16,160,176],[98,29,104,148],[3,2,167,160]]
],
[None, -130, -43, -124, 399, -483, -618, 87, -252, 280, -240, -241, -28, -494, -573, -233, 147, -137, 157, 187, 431, 162, -356, -485, -202, -342, -336, -489, -221, 358, 258, -276, -333, -396, -512, -297, -66, 179, -226, -346, -433, -305, 303, 174, -120, -79, -330, 32, 31, -24, 8, 471, -230, -546, 106, -304, -69, 368, 39, -15, 86, -615, 615, 41, -76, -309, -437, -322, -351, -24, -59, -194, -248, 242, -203, 354, -43, -675, 495, 80, -426, 562, -289, -22, -173, -1, -553, -251, -13, 470, 132, 11, 432, -108, 231, 102, 582, -354, 41, 138, 49, 228, -801, -122, -148, -5, 568, -365, -270, -507, -249, -125, 513, 205, 151, -452, 215, 14, 478, 55, -272, 28, -301, -103, -505, -274, 506, 339, -21, -424, 97, 108, -348, 466, -30, 226, 374, -455, -337, 377, 206, -25, 84, -31, -693, -21, -21, 355, 138, 18, -516, 30, 463, 397, 171, -165, -345, 320, -435, -81, -132, 268, -115, -298, -91, -92, -301, 57, 137, -384, 143, 179, -33, 371, 6, 305, -431, 202, -672, 10, 290, -447, -699, 162, 247, 153, -77, 64, -338, -252, 48, -269, -145, 31, -474, 272, -4, -3, 222, 168, -382, 32, 270, 55, 95, 31, -60, -96, 226, -69, 429, -518, -533, -464, -326, 333, 365, -470, 432, -2, 433, 389, -194, -673, -256, 140, -41, 296, -197, -447, -343, 327, 264, 130, -571, 120, 213, -400, -288, -76, 50, -204, 203, 120, 444, -232, 5, -265, -159, -542, 379, 170, -481, 113, -211, -642, -209, 47, 136, 75, 199, -113, 39, 91, -146, -182, -507, -437, -274, -428, -279, -139, -426, 356, 265, 138, -3, -371, 469, -260, -370, 153, -338, 128, 337, -320, -181, -521, -225, -217, -334, -172, 20, 160, -334, 154, 327, 249, 606, 14, 18, 23, 128, -110, 86, -250, -255, -385, -81, 17, -143, -559, -452, 4, 181, 49, -124, -861, 3, 273, -514, -27, 531, -93, -369, -237, 258, 454, -127, 78, 40, 81, -578, 188, -733, 65, 399, 273, -468, -96, 209, 7, -100, -469, 297, -131, -306, 40, -342, -34, -309, 131, -55, -708, 463, 18, 186, 385, 153, -560, -85, -764, -158, -88, -374, 496, -5, 62, -230, -31, -304, 211, -249, 391, 180, -275, 67, -109, -250, -412, 192, 392, 218, 495, 258, -114, 146, -53, -397, -34, -251, -157, 426, 30, 18, 322, 23, 295, -114, -154, 195, -230, 130, -583, -65, 306, -465, -323, -275, -347, -168, 68, 206, 281, 531, -105, -38, -482, -703, -317, -298, 78, -76, -170, -603, -496, 90, 127, -418, 228, 363, -534, 487, 553, 110, -62, -343, 337, 321, -484, -264, 2, -304, -97, -81, -441, -4, -690, -724, -422, 215, -6, -70, -407, 733, -494, 359, 312, -26, -87, 207, -744, -38, -461, -321, 257, 314, -475, 19, 122, -44, -586, -463, -544, 236, 557, 9, -522, -166, 194, 364, 245, -280, 424, -172, -398, -242, 277, -326, -225, -219, 65, 205, -389, -156, -442, 554, 205, -396, -131, 355, 144, -324, 439, -93, -2, 350, 2, -180, 62, -544, 14, -325, -507, 280, 199, 19, 117, 393, 241, -64, 423, -387, 346, -97, 114, 91, 154, 3, -67, 26, 393, -442, 53, 211, 225, -292, -362, -344, -352, -97, -271, 481, -185, -590, 42, 35, 305, 274, 481, -237, 308, -171, -67, -464, 82, -450, -401, 19, 173, 196, -120, 211, 93, 100, -276, 2, -791, -558, 175, -307, 164, 566, 594, 235, -68, -195, -191, 258, -98, -184, 119, 77, 417, 485, -354, -14, 34, 59, -83, -355, 238, -62, -31, -382, -51, -344, -580, 328, -248, -217, -459, -149, -515, -407, -245, -52, -288, 333, -253, -579, 252, 220, -247, 424, 114, 472, -573, 457, -37, -2, 116, -34, 55, 358, 53, -217, -242, -729, -258, -180, 461, -383, -211, 389, -344, 282, -113, -303, 488, 197, 262, -229, 422, 99, -355, 184, -346, -116, -351, -510, -215, 268, 388, 138, 211, -24, 605, -162, 13, 536, -305, -237, -335, -200, -893, 660, -34, 188, -562, -405, -163, 60, -735, 119, 174, 302, -420, -58, 0, -68, 398, 138, 204, -440, 9, -364, -67, 69, 100, 420, 58, 64, -473, 178, -337, -71, 33, 68, 57, -706, -506, -94, 184, -113, -94, 5, -117, -66, 152, -51, -391, 46, -22, -607, -173, -146, 441, 43, -355, 362, 249, -316, 25, -430, -347, -205, 16, -19, -780, 250, 165, -751, -32, -177, 110, -280, 490, -102, 326, 326, 454, 25, -218, 257, -455, -54, -479, -325, -396, 125, 126, 123, 231, -56, -231, -311, 186, 431, -116, -297, -406, -360, -504, 355, -280, -109, 11, 320, -409, 484, 231, -29, 113, 1, 201, -238, -232, -249, -344, 476, 518, -159, -144, 150, 219, 76, -268, 68, -465, 207, 515, -102, -45, 622, -66, -72, -11, -451, 451, -16, 298, -43, -154, -56, -1037, -573, -131, 441, -392, -399, 260, -360, 211, 259, -340, 23, 73, -88, 150, -224, -104, 529, 177, 167, -155, -105, -370, -417, -557, -61, 194, -441, 464, 498, 120, 451, 560, -202, 464, 367, 231, -88, -221, -274, -194, -342, -22, 115, 95, 236, 333, -360, 78, -236, 414, 208, -576, -72, -266, -365, -21, 126, 394, -207, -178, -122, 470, 94, -620, -445, -386, 168, 6, 289, -719, 62, -451, -181, 524, -59, 66, -123, -454, 222, 79, -87, -155, -508, 172, 818, -739, 101, -616, -169, 50, 435, 333, -191, -245, 179, -311, 78, 87, -18, 514, -309, -561, 618, 181, -59, -50, -73, 103, -368, -54, -294, 440, -331, 141, -63, 196, -251, -120, -164, -89, 661, 88, 385, -507, 270, -22, -9, -655, 171, 424, -71, -34, -697, 94, -369, -270, 96, 213, 155, 18, 258, -277, -47, -400, -199, -454, -694, -420, -177, -108, 75, 4, -102, -121, 86, -154, -495, 479, 202, 222, 98, -268, -263, 244, 50, 49, 321, -494, 48, 43, 32, -49, 172, 221, -143, 174, -217, 488, -10, 76, -330, -387, 399, 498, 402, 288, 522, 302, 398, -521, -527, 145, 454, -365, -27, 233, 339, 531, -450, 83, 90, -53, 137, 396, 346, 62, 91, 34, 472, -533, -170, 503, -112, -16, -235, -421, -469, 354, -263, 78, -395, -57, -197, 594, -465, 221, -648, 205, -65, -397, -721, 77, -793, 24, -34, -380, -648, 339, -601, 426, 114, 448, -122, -409, 284, 258, -56, 15, -250, -169, -198, 429, -235, 14, 202, 56, -32, 98, -38, -517, -172, -7, 462, -493, -369, 22, -108, 21, -545, 282, -161, -316, 690, 106, -332, -353, -201, -259, -327, -270, -456, -117, -138, 334, -148, -301, -16, -496, -39, 180, 350, 322, -242, 480, 569, -669, -45, -268, -525, -380, 352, 231, -510, 3, -290, -262, 265, 402, 92, 572, -402, 662, -48, -369, 26, 89, -122, -392, -672, -202, 321, 169, -450, 300, -12, 131, 325, -514, -20, 414, -400, 205, -284, 326, 252, -677, 96, -382, -658, 380, -24, 43, -467, 201, 206, 76, -189, -4, 85, 382, -108, 41, -524, 3, -562, 105, -416, 328, -428, 156, -61, 2, -110, 42, -115, 486, 24, -314, 65, -10, -488, 386, -27, -474, -243, 463, 61, 242, 253, -128, -764, 413, -275, -702, 303, -465, -215, -521, -807, 181, -350, -77, 267, 190, 9, -168, -48, -309, 214, 490, 238, -292, 210, -358, 284, 64, -415, 97, -117, -330, 335, -42, 251, -170, -440, 365, -236, -193, -223, -36, 521, 239, -285, -559, 234, 140, -432, -151, 70, -22, -299, -306, -158, -409, -365, 296, -72, -417, -427, -58, -153, 89, -74, -601, 72, 35, 323, -215, 243, 122, 132, 470, 380, -489, -162, -685, -294, -406, 117, 154, -424, 72, 428, 303, 149, -28, -396, -107, 226, -195, -448, 381, 532, -178, -254, -367, -270, 6, 309, -160, 246, 267, 0, 181, -29, -256, 69, 101, -116, -178, -382, 215, 93, 72, 150, -516, 481, -372, 592, 193, 217, -150, -76, -569, -270, 459, 121, 90, 304, 94, 82, -382, -105, -333, -327, -494, -269, -262, -142, 244, 123, -401, 155, -204, -38, -705, -509, -27, -588, -205, 341, -109, 152, -517, -145, 112, 73, 289, 331, 149, 19, -49, -543, 75, -206, -442, -186, 18, 246, 209, 296, -11, 309, 603, 112, 205, 6, -181, -406, -36, -32, 15, -34, -181, -238, -52, 136, -236, -128, 124, -530, -181, -220, 312, 452, -112, 18, 162, -449, 528, -194, -22, -468, -51, -155, 68, -56, 290, 624, -129, -151, 55, 417, -28, -265, 155, 171, 488, -322, -330, -23, 290, -105, -169, -30, -368, -178, 239, 190, 630, 269, -41, -224, -708, -145, -519, -220, 553, -632, 17, -47, -451, 367, -421, -409, 602, -494, 455, 16, -234, 192, -103, -37, -43, -296, -458, -409, -621, -44, -242, 138, 397, 229, 1, -494, -287, 93, -151, -13, 356, 196, 100, -331, -279, 69, 282, -99, 41, -482, 25, -152, -269, 393, 474, -220, 247, -567, 189, -592, -302, -134, 297, -45, 534, -715, 379, -318, -475, -8, 313, 111, 439, -130, 78, -185, -297, -235, 194, 254, -324, -419, -300, -162, 175, 93, 376, -151, 231, -349, 219, -306, -431, -417, -320, 361, -338, -202, -171, -80, -80, -512, -128, -158, 221, 453, 246, 64, 41, -87, 68, -432, -222, -430, 26, -96, 135, -77, -22, 266, -158, -197, 356, -164, 81, -441, -154, -515, -392, 309, -479, -367, -190, -728, -405, -192, 360, -245, 97, -429, 265, 1, -196, -62, -34, -619, 379, -370, -4, -428, 417, -110, 63, 17, 382, 162, -214, 128, -125, -495, -325, -89, -99, -112, -303, -238, -177, -130, -665, -178, -265, 35, -122, 306, -21, 91, 440, 243, 457, 7, 206, -18, -451, 264, -387, 84, -213, -267, -157, -74, -172, -95, 322, -17, -454, -192, 158, -183, -524, -250, -484, -64, -724, -394, -592, -486, -480, -221, -296, 62, -314, -378, -338, -174, 85, 251, 191, -264, -293, -65, 221, 101, -322, 386, 136, -365, 378, 365, 515, -553, -224, 12, 355, -140, -46, -60, 122, 103, 471, -451, -89, -22, 200, -209, 462, -337, 450, 50, -231, 341, -576, -119, 159, -285, -193, -589, -224, -325, -86, 407, -164, -28, 81, -237, 147, 8, -201, -222, 285, 20, 140, -781, -434, -25, -357, 50, -5, -43, 349, 165, -110, -855, -93, -265, 77, -201, 61, 250, -16, -86, 213, -301, 473, -71, 446, -410, 137, -27, -382, 205, 379, 331, 517, 91, 239, -297, -31, -277, -272, -707, -293, -68, 486, -170, -261, -90, 340, -447, 218, -343, -38, 518, -492, -88, 54, -198, -210, 485, -503, 263, 128, 687, 411, 617, -20, 20, -403, 274, 190, 159, -243, -714, 594, 362, 76, -41, -248, -269, -166, -472, 278, 155, 554, -629, -335, 7, 83, -675, 446, 295, -565, -172, 132, 503, -301, -37, 264, -226, -44, 124, -155, -182, -696, -386, -266, -382, 554, 354, -675, -45, -21, -539, -166, 85, -467, 195, 130, -67, 7, 33, 18, 235, 58, -365, 448, 43, 572, -439, 141, -47, -379, -164, -241, -157, 361, -522, -119, 306, 265, 135, 601, 71, -112, 26, 265, 70, -351, -448, -186, -193, -139, -225, -118, -183, -74, 599, -281, -289, -416, 413, 71, -264, -20, -282, -324, 126, -182, -511, 361, -301, -371, 86, -265, -105, -311, -128, 436, -621, -235, -155, -36, -354, -369, -270, -186, -166, -95, -270, 110, -481, -130, 445, 359, -119, -286, -44, 26, 20, 301, 453, 623, -576, 363, -318, -360, -166, 47, -356, -364, 35, -204, -288, -213, 32, -344, -51, -44, -183, -188, 34, 294, 255, -442, 114, 206, -59, 257, 202, -106, -276, 481, 9, 38, 436, 192, 362, -766, 481, -464, 102, -287, 490, -453, 161, -313, -177, 231, -389, -63, -305, -247, -44, -422, -112, 56, 277, -195, -244, 249, -368, 188, 139, 667, 206, 307, -31, -9, -101, -60, -385, 88, 130, 241, 329, 33, -393, -133, -302, 16, -180, -260, -512, -396, -778, 375, -331, 223, -381, -136, -150, 532, 118, 586, 495, 688, -252, -580, -497, -498, 58, -554, 5, 396, -213, 72, 143, -624, -400, -362, -144, 234, 333, -22, 125, -364, 216, 105, -153, -68, -480, -24, 347, -76, -428, 238, 462, -329, 422, 433, 500, -235, -266, 19, -240, -626, -23, -96, -358, -459, -569, 79, 553, 85, -220, -3, -77, 344, 211, -53, 463, 85, 509, 102, -453, 26, -489, -270, -54, -392, 7, -24, -116, 98, -285, -228, 336, 282, -160, -7, -482, 279, -414, -432, -343, 105, -147, 447, -351, -56, -392, -535, 94, 115, -373, 120, -203, 418, -99, 340, 5, -177, -238, 407, 170, 361, 390, -10, 219, -511, -290, -336, -294, -244, 309, 194, 351, 288, -358, -157, 107, -96, 628, 441, 59, 45, -102, 205, 130, -145, -506, -380, 190, -558, -215, -110, 104, 293, -232, -252, -112, -28, -113, -579, 182, -759, 119, 514, -25, -58, 449, 144, -157, -384, -88, -366, 330, 214, -250, -149, 451, 431, -319, -62, 327, -504, 4, 233, 249, -278, -393, -232, -116, 204, -600, 60, -97, 127, 10, -259, 754, 94, -550, 187, -190, 125, -356, 170, -49, 534, -315, 150, -158, 279, 417, -28, -196, -147, 194, 97, -169, -238, -88, -70, 67, -228, -253, 139, -641, 113, -200, -49, 46, 31, -230, 14, 483, -64, 417, -217, 31, -753, -417, -373, -176, 93, 219, -386, -357, -594, 249, 359, 77, 167, 234, -336, -684, 22, -851, 318, -72, 143, 82, 248, -225, 158, 321, -469, -259, 186, -381, -181, 40, -288, -288, 217, -285, -290, 456, -46, 218, -377, -164, -158, 131, -85, 143, -38, -8, -388, 20, 50, 80, 532, -38, 57, 183, -493, -265, 124, -270, -474, 79, -142, 168, -107, 21, 2, -108, -137, -312, -202, 513, 150, -323, -317, 304, -237, -57, -617, -301, -37, -3, -572, -408, -371, 487, -100, -312, 306, 355, 246, -388, -225, -99, -221, -548, 98, 109, -232, 242, -414, 242, -568, 142, 551, -544, 185, -150, 130, 434, -320, 82, -209, -123, -685, 89, -863, 93, -280, -35, 122, 157, 95, 386, -35, 262, -94, -346, 379, -732, -364, -316, 368, 62, 91, -187, 283, -596, 138, -565, 110, 327, 422, -9, 12, -222, 510, -163, 373, -103, -572, -218, 287, -653, 150, 263, -447, -615, 129, 213, -350, 623, 286, -737, 145, -309, 453, 369, 59, -82, -290, -580, -22, -333, 109, -39, -218, 152, 160, 523, -820, -54, -760, -413, -275, 93, -717, 129, -48, -35, 461, 595, -364, -7, 0, -115, 497, -120, -520, 357, -120, 119, 340, -390, 334, 124, 19, -705, -591, 239, 378, -77, 482, 257, 532, -230, -265, 347, -261, -618, -513, 367, -300, -587, -50, 184, 38, -416, -68, 147, 220, 204, 427, 402, 143, 220, -456, -683, -450, -686, 37, 55, -164, 10, -273, -254, -39, 34, -257, 219, 578, -229, 143, -272, -481, -148, 445, -298, -68, -551, 222, -135, 244, -513, -148, 187, 189, 59, 139, -200, 869, -109, 195, 313, -646, 49, -523, -361, 363, -225, 112, -52, -62, -84, 146, 1, 92, -508, -99, -84, -467, 67, -173, 38, 149, 31, -68, -320, -603, -519, 224, 301, 53, -134, -9, 148, -550, -280, -411, -428, 223, -522, 98, -289, -406, -267, 131, -225, 206, 188, 501, 93, 88, 207, 213, 428, 394, -457, 471, 414, 493, -729, -346, 138, -411, -608, 179, -215, -259, -573, -17, -259, 117, 395, -349, -325, -152, -62, -496, 382, -446, -137, 137, 271, -114, 149, 266, -232, 183, 372, -529, 182, 116, 188, 274, 72, -36, -131, -240, 88, -261, 497, 89, 357, -15, 24, 256, -103, 246, 351, -333, -238, -79, 100, 195, -42, 345, 503, -295, 527, 388, -391, -312, -237, 29, 567, 15, -231, 95, -113, 231, 264, 170, -389, -553, 114, 232, -422, -133, 107, -454, -322, -319, -117, -76, -109, 460, -260, 7, 381, -307, -108, -152, -358, 7, -470, -137, -465, 360, -72, -285, -474, -225, -196, 46, -148, -581, -38, 96, -181, 345, -501, -19, -730, 173, -192, -127, 214, 229, 140, 22, 455, 143, -253, -273, -332, -468, -4, -340, -182, -97, 172, -204, 397, -289, -160, -646, -14, 179, -126, -44, 73, -194, -499, -308, 235, -216, -10, -184, 268, 182, 327, -9, -39, -480, 138, -308, 113, -48, -172, 141, -455, -15, 179, -7, -155, -11, 268, 241, -549, 46, 208, -222, -170, 93, 96, 69, -476, -135, -549, -462, 265, 151, 231, 413, -425, 408, -330, 250, 8, -161, -37, 162, 433, -63, 218, 381, -377, 420, -37, 499, 220, 263, 129, 347, -478, 270, -442, -11, -160, -245, 71, 63, -57, 6, 304, 502, -230, -668, 110, -259, -166, -427, -565, 161, -46, -448, 406, -340, 168, 150, 538, -299, -97, 170, -96, 40, -126, -86, -431, 333, -276, -576, -16, 359, -467, -519, 74, 309, 209, -164, 30, -98, 436, 558, -131, 450, 107, -415, -676, 19, -560, 183, -256, -298, -324, -139, 437, -3, -446, -132, -513, -165, 19, -179, 58, 101, 377, -118, -182, -125, 16, 341, -591, 179, 532, -609, -379, -322, 323, -308, -830, 117, -80, -322, 624, -111, -364, -400, 416, 288, -151, -506, 284, -309, -237, -240, 31, 128, 107, 40, -182, 43, 56, -153, -589, -327, -311, -94, 73, -17, -623, -274, -323, 128, -277, -264, -113, 185, 129, 322, -17, 176, -488, 695, 269, -306, 417, 417, 73, -543, 75, -392, -473, 308, -43, 538, 322, -430, 98, 569, 144, -155, 166, 226, 77, -89, -342, 165, -98, 398, 226, -431, 268, -4, 90, -41, -502, -185, -95, -294, 140, -203, -15, -153, 457, -384, 985, -677, -118, 399, -244, 102, 237, -592, 96, 121, -585, 9, -97, 389, -233, 475, -109, 178, 8, -449, -210, -397, 240, -552, 31, 73, -208, -268, -299, 19, -187, 173, 390, 490, 20, -222, 462, -251, -219, -12, 434, 251, -295, -535, 154, -189, -329, -596, -205, -476, -577, -23, 232, -168, -339, -257, -40, -51, -196, -118, 72, 227, 50, 433, 30, -95, 173, 19, -118, 364, 173, -568, 312, 162, 157, 498, 328, -413, -450, 522, -878, -215, -408, 76, -120, 399, 499, 141, 112, -55, -405, -493, -40, 74, -2, -76, -219, -633, -375, -114, -244, -553, 33, 105, -224, -374, -639, 288, 71, -517, -42, 50, 60, -320, -642, 237, 22, 297, 413, -249, -49, -284, 119, 218, 10, 588, 32, -481, 147, 179, 166, 225, 207, 206, 133, 262, -349, -264, 298, -145, -91, -200, 143, 17, 6, -246, 426, 598, 206, 557, -103, -459, -359, 209, -278, -179, -110, 77, 273, -80, -33, 170, -155, -374, -91, 282, 166, -332, 423, 56, -185, 437, -24, 25, -49, -113, -184, -260, 5, -490, -299, -2, -193, 159, 329, -90, 105, 300, -237, -2, 386, 8, -334, -548, -194, -885, 5, 182, -256, -294, 303, 182, -317, 105, 115, -405, 182, -118, -53, 213, 333, -307, 364, 210, 150, -413, 25, 48, -325, 134, 147, 18, 211, -209, -314, -262, 63, -50, -178, 11, 439, 483, 133, 331, 625, 558, -394, -301, -16, -53, -399, 94, -21, -101, -153, -203, -158, 248, 292, -361, -287, 17, -184, 290, -104, -26, 315, 312, -510, 305, -456, 296, -297, 70, -142, -246, -590, -245, -516, 319, -368, -87, -238, 58, -438, -353, -174, 215, 280, -616, 10, -386, 117, -643, 224, -311, -258, -464, -28, 97, 141, 112, -43, -229, 0, 556, 52, -333, -114, -64, 500, 285, 148, 355, -77, 188, 261, 221, 207, -132, -157, -458, 332, -331, -173, 50, 15, -139, -219, -92, 295, 432, -181, -473, -127, 42, 137, 174, -494, -419, -494, -36, 17, 351, 34, 14, -353, -200, 166, -457, -410, -518, -8, 570, -16, 45, -619, -324, -707, -47, -648, -457, 236, 58, 388, -366, -57, -650, -450, -187, -227, 80, -189, -318, -7, -233, 128, -212, 256, -28, 562, -124, 317, -171, 137, 5, -264, 101, -20, 84, 261, 35, -419, 277, 46, -595, -30, 491, 50, 79, 198, -302, -183, -675, 196, -185, -332, -134, -11, 283, 388, 405, -3, -450, 554, 161, 40, 19, 238, 101, -220, -105, -257, 144, -383, -177, 243, 233, 360, -152, -382, -180, -480, 170, -445, -304, 344, 87, -172, 79, -708, -460, -59, -292, -341, 132, 279, -11, 314, 59, 91, -2, 376, -276, -138, 404, 305, 69, -583, -116, -200, -397, 147, 246, 417, -319, 279, 194, -187, 432, -216, 541, -151, -10, -323, -221, 468, 442, -182, -390, -119, -358, -386, 34, 161, -293, -193, 277, -39, -577, 388, -256, -447, -640, -428, -220, -111, 34, 272, 33, -136, -250, -491, -71, -48, -226, -499, -443, 63, -280, 545, 163, 1, -396, -486, 546, -289, -189, -83, 279, -177, 34, -227, -583, 1, -407, 63, 16, -238, 233, -633, -331, -343, 70, 139, 429, 244, 373, 13, -438, 525, 32, 64, -339, -62, 127, -276, -273, -11, -149, -368, 496, -644, -181, 103, -61, -62, 185, 81, -59, -8, -69, -186, 250, 206, 316, 272, -349, -258, 506, -13, -704, -48, 23, -316, -491, 439, -420, -130, -42, 127, -215, 135, 131, -292, 571, 211, -726, -94, 52, -302, -309, -357, -333, 78, 369, -277, 320, -104, -278, -256, -285, -462, -208, 88, -444, -563, 371, 530, 353, -110, 341, -392, 153, 122, -485, 69, -145, 254, -406, -2, 432, -608, 292, -7, -131, 211, -479, -403, -42, 83, -42, -105, -6, 43, 238, -261, 93, -37, 219, 636, -95, 471, -606, -41, -158, -425, 229, -447, -240, -220, -106, 389, -60, -345, -139, 451, -109, -462, -717, 280, 31, 35, 307, -143, -801, 370, 574, -267, 194, -508, -116, 29, 236, 121, -366, -402, -511, 252, -39, 441, -384, -170, 466, -101, -580, 29, 131, -770, 590, -204, -669, -276, -282, -123, -154, 307, -114, -54, -117, 3, -372, -512, -49, -178, -753, -471, -55, -323, -259, -501, -273, -26, -485, -496, 257, -245, -2, -91, -99, -92, 204, -596, 191, 80, -359, 32, -26, -387, 220, 191, -511, -73, -61, 167, -107, -47, 72, -150, 199, -481, 323, 559, 155, 445, -60, -327, -31, 0, -630, -23, -166, -656, -409, -745, -205, 171, -118, -73, 268, 123, 201, -441, 65, 28, -31, -461, -131, -677, -137, -9, -473, -266, 60, -56, -711, 413, 476, -71, 26, -286, -10, -101, -14, -266, 282, 351, -615, -197, -51, 366, -156, -451, -122, 270, -300, -23, -534, -629, 365, 151, -616, -208, 212, -505, -332, 171, -10, -96, -317, -41, -304, -425, 601, -433, -278, 215, -85, -540, 160, -642, 53, 54, -247, -53, 178, 115, 30, -114, -425, -338, -118, 16, -498, -13, 347, -513, 66, -75, 323, -425, -540, 49, -660, -194, 303, -129, -372, -178, 83, 444, -46, -343, -260, -89, 444, 439, 359, 167, -251, -106, 131, -118, 270, -166, 253, -13, -636, 270, -93, -323, -140, 27, -169, -346, 514, -556, 163, 544, -339, 301, 97, 307, 531, -466, 319, -760, 237, -66, -251, -184, -6, 85, -709, 84, -299, -141, 593, -247, -90, -106, 70, 2, -370, 270, -118, -438, -138, 338, -161, -392, 255, -172, 354, -146, 305, -310, 92, 156, 160, 460, -130, -336, -23, 280, -24, -47, 126, 291, -40, -71, 136, -443, -61, -653, -131, -391, 38, -188, -413, 139, 1, 424, -113, 64, -571, -450, 132, -117, -99, 176, 304, -536, -368, -614, -23, 159, 38, 24, 245, -255, -61, 238, -66, -130, 122, 8, 66, -98, 189, 108, 339, -566, 22, -163, 40, -42, 166, -422, -299, 327, -309, 285, -514, -321, 323, 323, -349, 162, 441, 100, -334, 292, -549, 415, -166, -290, -243, -283, -324, -307, -277, -402, -682, -310, -479, 197, -436, -237, 12, -78, 624, -434, -416, 221, -370, 54, 199, -260, -342, 153, 216, 210, 27, 145, 307, 66, -682, -67, -595, 136, -413, 73, -203, -229, 54, -496, -220, -262, 192, -514, 59, -176, -371, 600, -20, 96, 211, -229, -193, -379, 69, -189, -135, -508, -514, 161, -160, -140, 492, -105, 396, 148, -484, -39, 65, -148, 301, 610, 285, -402, 42, -203, 35, 98, 77, 90, 179, -531, -733, 398, -438, -230, -244, -300, 368, -121, -201, -108, -567, -34, 475, -118, 492, -183, 274, -552, 310, -703, -102, -71, 97, -480, -29, 19, 199, 121, 188, -166, 198, 329, 560, 99, -115, 290, 420, 266, -392, 80, -607, 330, 241, -572, -388, -535, 303, 377, -662, -182, 333, 102, -315, -348, 34, 406, -233, -214, 197, -522, -394, 235, 262, -607, -242, -175, -62, -199, 38, -190, -396, -13, 23, -371, 176, 28, 473, 193, -175, -269, -206, 552, 107, -462, -595, -156, 361, 6, -128, 40, 21, 43, -389, -207, -26, 125, -61, 407, -620, 5, -107, 351, 40, -196, -44, 127, -334, -573, -224, 50, 289, 397, 76, -117, -400, 222, 12, 26, 334, -200, -27, 8, -439, 572, 61, -169, 99, -299, -785, -628, -80, 192, 495, 652, 5, 288, 196, 41, -128, -363, -533, 352, -158, -265, 71, 625, 187, -72, -188, -166, -116, -198, -44, 460, -442, 425, -539, -274, -443, -754, -53, -53, 451, -347, 529, -487, -366, -425, -298, -402, 672, 54, -58, -110, 37, -377, 148, 138, 134, -187, -164, 352, -557, -91, -563, -608, -21, -187, 228, 199, 461, -559, -100, 82, -415, -218, -61, 184, 460, -343, -40, -437, -129, -816, 210, -133, -63, -156, 58, 132, -659, -31, -120, 37, 28, -345, 423, 11, -407, -114, -54, -243, 31, -400, -210, -292, 182, 299, 408, -515, -391, 172, -316, -459, 278, -717, 566, 271, -310, 684, -142, -445, -255, 126, -382, -103, -369, 347, 20, -241, 244, -86, -84, -655, 23, 188, -167, -298, -460, -794, -413, -178, 254, -277, -61, 305, 268, -629, 116, -15, -288, 469, 127, -172, -84, -659, -93, -59, 388, -515, -334, -559, -19, 27, -57, -334, -299, -421, 52, 361, -325, -313, -173, 151, -367, 241, -167, -20, -28, 59, -101, 282, 287, 69, 47, 597, -87, -273, 56, 201, 155, 125, 351, 255, -581, 211, 33, 30, -162, -377, 68, -26, -533, 53, -64, -139, 429, -390, -456, -128, 827, 302, -454, -26, -287, 394, 292, -280, -185, 11, 82, 287, -282, 7, 592, -209, -525, -307, 39, -825, -90, -121, -288, 429, -271, -223, 43, 149, 197, 87, 113, -52, -200, -251, 282, 341, -192, -725, -248, -216, 70, -52, 319, 392, -293, 62, -306, -22, -490],
),
}
from typing import List
class Solution:
def test(self, arr1: list, arr2: list):
results = []
for idx in range(len(arr1)):
if arr1[idx] == "NumMatrix":
obj = NumMatrix(arr2[idx][0])
results.append(None)
else:
row1, col1, row2, col2 = arr2[idx]
s = obj.sumRegion(row1, col1, row2, col2)
results.append(s)
return results
# TLE: 10, 16471.35
class NumMatrix1:
def __init__(self, matrix: List[List[int]]):
self.y = len(matrix)
self.x = len(matrix[0])
self.arr = [matrix[j][i] for j in range(self.y) for i in range(self.x)]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
ret = 0
for j in range(row1, row2+1):
for i in range(col1, col2+1):
idx = j * self.x + i
ret += self.arr[idx]
return ret
"""
11 / 11 test cases passed.
Status: Accepted
Runtime: 3184 ms
Memory Usage: 351.6 MB
"""
# Avg: 4110.83
class NumMatrix2:
def __init__(self, matrix: List[List[int]]):
row = len(matrix)
col = len(matrix[0])
self.hash = {}
for r in range(row):
self.hash[r] = dict((f'{i}-{j}', sum(matrix[r][i:j+1]))
for i in range(col) for j in range(i, col))
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
ret = 0
col = f'{col1}-{col2}'
for row in range(row1, row2+1):
ret += self.hash[row][col]
return ret
"""
11 / 11 test cases passed.
Status: Accepted
Runtime: 108 ms
Memory Usage: 17.6 MB
"""
# Avg: 16.07
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
h = len(matrix)
w = len(matrix[0])
self.arr = [[0]*(w+1) for _ in range(h+1)]
for r in range(1, h+1):
for c in range(1, w+1):
self.arr[r][c] = self.arr[r-1][c] + self.arr[r][c-1] - \
self.arr[r-1][c-1] + matrix[r-1][c-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
ret = self.arr[row2+1][col2+1] - self.arr[row2+1][col1] - \
self.arr[row1][col2+1] + self.arr[row1][col1]
return ret
| 1,191.859375
| 146,847
| 0.550846
| 59,890
| 228,837
| 2.104525
| 0.017332
| 0.625944
| 0.938059
| 1.249603
| 0.528094
| 0.365535
| 0.323627
| 0.31659
| 0.315828
| 0.315828
| 0
| 0.383684
| 0.029056
| 228,837
| 191
| 146,848
| 1,198.099476
| 0.183583
| 0.001368
| 0
| 0.222973
| 0
| 0
| 0.173227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047297
| false
| 0
| 0.006757
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8651d337582f987848a8b356298e8fd8167b03fe
| 85,696
|
py
|
Python
|
core_utilities/plotting.py
|
Jon-Webb-79/Core-Utilities
|
c2d5987418e543360bf844fb3c23f31e7482e71f
|
[
"BSD-2-Clause"
] | null | null | null |
core_utilities/plotting.py
|
Jon-Webb-79/Core-Utilities
|
c2d5987418e543360bf844fb3c23f31e7482e71f
|
[
"BSD-2-Clause"
] | null | null | null |
core_utilities/plotting.py
|
Jon-Webb-79/Core-Utilities
|
c2d5987418e543360bf844fb3c23f31e7482e71f
|
[
"BSD-2-Clause"
] | null | null | null |
# Import necessary packages here
from typing import List
import warnings
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.dates as mdates
from matplotlib import rc, pyplot as plt
# ============================================================================
# ============================================================================
# Date: December 18, 2020
# Purpose: This file contains classes and functions necessary for
# plotting.
# Source Code Metadata
__author__ = "Jonathan A. Webb"
__copyright__ = "Copyright 2020, Jon Webb Inc."
__version__ = "1.0"
# ============================================================================
# ============================================================================
def text_date_plot(dates: List[List[str]], y_data: List[List[float]],
line_colors: List[str], line_style: List[str],
line_weight: List[str], x_label: str, y_label: str,
dat_labels: List[str], label_pos: str, y_scale: str = 'LIN',
plot_name: str = 'NULL', save: bool = False,
label_font_size: int = 18, tick_font_size: int = 18,
style_name: str = 'default', title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param dates: A list of lists, where each inner list contains a list of dates
as a text string in the format YYYY-MM-DD or YYYY/MM/DD
:param y_data: A list of lists containing y-axis data corresponding to the
list of lists in `dates`
:param line_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for
matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The x-axis label
:param y_label: The y-axis label
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``.
:param y_scale: 'LOG' or 'LIN' for logarithmic or linear scale
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets as a function of date. This function assumes that the
date string is in the format of a text string and not a Timestamp or datetime. This function also autonomusly
determines the appropriate date display format. If you desire plots as a
function of time you should use the ``text_time_plot`` function. The function can be used in the
following manner;
.. code-block:: python
> # Use stock data for example
> tickers = ['AAPL', 'WMT']
> data = yf.download(tickers, '2015-1-1')['Adj Close']
> # transform Timestamps to string
> dates = list(data.index.strftime('%Y-%m-%d'))
> date_list = [dates, dates]
> y_list = [list(data[tickers[0]]), list(data[tickers[1]])]
> colors = ['red', 'green']
> line_style = ['-', '-']
> weight = [1.0, 1.0]
> text_date_plot(date_list, y_list, colors, line_style, weight, 'Date',
'$', tickers, 'upper left')
.. image:: date.eps
:align: center
"""
# Adjust format for YYYY/MM/DD to YYYY-MM-DD
outer_list = []
for i in range(len(dates)):
inner_list = []
for j in range(len(dates[i])):
year = dates[i][j][0:4]
month = dates[i][j][5:7]
day = dates[i][j][8:10]
date_string = year + '-' + month + '-' + day
inner_list.append(datetime.strptime(date_string, '%Y-%m-%d'))
outer_list.append(inner_list)
# Determine time difference between min and max point
days = 0
for i in outer_list:
delta = (max(i) - min(i)).days
if delta > days:
days = delta
# Start plot
fig, td_plot = plt.subplots()
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if days <= 15:
myfmt = mdates.DateFormatter('%d')
td_plot.xaxis.set_major_locator(mdates.DayLocator())
elif days <= 180:
myfmt = mdates.DateFormatter('%b-%y')
td_plot.xaxis.set_major_locator(mdates.MonthLocator())
else:
myfmt = mdates.DateFormatter('%b-%y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(4))
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(outer_list)):
td_plot.plot(outer_list[i], y_data[i], color=line_colors[i],
label=dat_labels[i], linewidth=line_weight[i],
linestyle=line_style[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_line_matplot(x_data: List[List[float]], y_data: List[List[float]],
line_colors: List[str], line_style: List[str],
line_weight: List[str], x_label: str, y_label: str,
dat_labels: List[str], label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param line_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for
matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``.
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets. This function will only produce line plots and
not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> two_d_line_matplot(x_list, y_list, colors, line_style, weight, 'x-data',
'y-data', labels, 'upper left')
.. image:: line_plot.eps
:scale: 90%
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(line_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(line_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if len(line_weight) != len(x_data):
warnings.warn('line_weight list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(line_colors)):
td_plot.plot(x_data[i], y_data[i], color=line_colors[i],
label=dat_labels[i], linewidth=line_weight[i],
linestyle=line_style[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_scatter_matplot(x_data: List[List[float]], y_data: List[List[float]],
marker_colors: List[str], marker_style: List[str],
x_label: str, y_label: str, dat_labels: List[str],
label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param marker_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for `matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param marker_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for `matplotlib style`_.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets. This function will only produce line plots and
not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> two_d_scatter_matplot(x_list, y_list, colors, line_style, weight, 'x-data',
'y-data', labels, 'upper left')
.. image:: scatter_plot.eps
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(marker_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(marker_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(marker_colors)):
td_plot.plot(x_data[i], y_data[i], color=marker_colors[i],
label=dat_labels[i], marker=marker_style[i],
linestyle=' ')
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def two_d_scatter_line_matplot(x_data: List[List[float]], y_data: List[List[float]],
marker_colors: List[str], marker_style: List[str],
line_style: List[str], line_weight: List[str],
x_label: str, y_label: str, dat_labels: List[str],
label_pos: str, x_scale: str = 'LIN',
y_scale: str = 'LIN', plot_name: str = 'NULL',
save: bool = False, label_font_size: int = 18,
tick_font_size: int = 18, style_name: str = 'default',
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param x_data: A list of lists, where the inner lists contain data points
for the x-axis
:param y_data: A list of lists, where the inner lists contain data points
for the y-axis
:param marker_colors: A list of line colors ,one for each curve.
Acceptable line color indicators can be found in documentation
for `matplotlib colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`_.
:param marker_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for `matplotlib style`_.
:param line_style: A list of line styles, one for each curve. Acceptable line
styles can be found in documentation for
`matplotlib style <https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html>`_.
:param line_weight: A list of line weights, one for each curve.
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param dat_labels: A list of labels, one for each curve
:param label_pos: The position of the label in the plot, examples might be
``upper left``, ``lower right``
:param x_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param y_scale: LOG or LIN for logarithmic or linear, defaulted to LIN
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param save: True or False, defaulted to False
:param label_font_size: The font size for plot labels, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return None:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single plots of one or multiple data sets overlaid with line plots. This function will only produce
line plots and not scatter plots or a combination of both. The function can be used in the following manner;
.. code-block:: python
> x_dat = np.linspace(0, 10, 15)
> y1_dat = x_dat
> y2_dat = x_dat ** 2.0
> y3_dat = x_dat ** 3.0
> x_list = [x_dat, x_dat, x_dat]
> y_list = [y1_dat, y2_dat, y3_dat]
> colors = ['red', 'blue', 'black']
> line_style = ['-', '-', '--']
> labels = ['linear', 'squared', 'cubed']
> weight = [1, 2, 3]
> marker_style = ['^', 'o', 'd']
> two_d_scatter_line_matplot(x_list, y_list, colors, marker_style,
line_style, weight, 'x-axis', 'y-axis',
labels, 'upper left', save=True, plot_name=plt_name)
.. image:: line_mark.eps
:align: center
"""
# Error checking and warnings
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if len(x_data) != len(y_data):
warnings.warn('length of x list of lists is not the same as y list of lists, plot not printed')
return
if len(marker_colors) != len(x_data):
warnings.warn('line colors list not the same length as data lists, plot not printed')
return
if len(marker_style) != len(x_data):
warnings.warn('line_style list not the same length as data lists, plot not printed')
return
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(marker_colors)):
td_plot.plot(x_data[i], y_data[i], color=marker_colors[i],
label=dat_labels[i], marker=marker_style[i],
linestyle=line_style[i], linewidth=line_weight[i])
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
# ----------------------------------------------------------------------------
def one_d_histogram_plot(data: List[List[float]], labels: List[List[str]],
x_label: str, y_label: str, colors: List[str],
edge_colors: List[str], shading: List[float],
label_pos: str, num_bins: int = 50, tick_font_size: int = 18,
label_font_size: str = 18, style_name: str = 'default',
save: bool = False, plot_name: str = 'NULL',
hist_type: str = 'bar', dens: bool = False,
title: str = 'NULL', title_font_size: int = 24) -> None:
"""
:param data: A list of lists containing data for one or multiple
distributions
:param labels: A list of labels, one for each distribution
:param x_label: The label for the x-axis
:param y_label: The label for the y-axis
:param colors: The fill colors for each ``bar`` plot. If a ``step`` plot
is selected, this input is irrelevant, but data must still be
passed to the function.
:param edge_colors: The colors for the edge of each bar or step plot
:param shading: The level of transparency for bar plot fill. a Value of
0 is invisible, 1 is the maximum color density
:param label_pos: Where in the plot, the labels for each curve are to be
placed. ``upper left`` or ``lower right`` are examples.
:param num_bins: The number of bins to be plotted, defaulted to 50
:param tick_font_size: The size for each tick, defaulted to 18
:param label_font_size: The size for printed font, defaulted to 18
:param style_name: The plot style to be used. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
defaulted to ``default``
:param save: True or False, defaulted to False
:param plot_name: The plot name and path-link, if the user wants to save the
plot. If not, the variable is defaulted to ``NULL``
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
:return:
This function utilizes the matplotlib
`subplots <https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html>`_ functionality
to produce single phistogram plots or multiple overlaid plots. The function can be used in the following manner;
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> colors = ['blue', 'green']
> edge_colors = ['black', 'black']
> alpha = [0.9, 0.2]
> x_label = 'x-axis'
> y_label = 'y-axis'
> one_d_histogram_plot(data, labels, x_label, y_label, colors, edge_colors,
alpha, 'upper left', num_bins=50, hist_type='step',
dens=True)
.. image:: hist1.eps
:align: center
The plot parameters can be changed to produce a normalized plot, only
showing the histogram outline with the following code.
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> colors = ['black', 'red']
> edge_colors = ['black', 'red']
> alpha = [1.0, 1.0]
> x_label = 'x-axis'
> y_label = 'y-axis'
> one_d_histogram_plot(data, labels, x_label, y_label, colors, edge_colors,
alpha, 'upper left', num_bins=50)
.. image:: hist2.eps
:align: center
"""
if len(labels) != len(data):
warnings.warn("data list should be the same length as the labels list")
if len(labels) != len(colors):
warnings.warn("data list should be the same length as the colors list")
if len(labels) != len(edge_colors):
warnings.warn("labels list should be the same length as the edge_colors list")
if len(labels) != len(shading):
warnings.warn("labels list should be the same length as the shading list")
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(labels)):
plt.hist(data[i], bins=num_bins, color=colors[i], edgecolor=edge_colors[i],
alpha=shading[i], label=labels[i], histtype=hist_type, density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# ================================================================================
# ================================================================================
class MatPlotDataFrame:
"""
:param df: Dataframe containing columnar data to be plotted
This class will plot user specified data from a pandas dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
self.colors = ['lightgrey', 'deepskyblue', 'sandybrown',
'teal', 'limegreen', 'coral',
'hotpink', 'magenta', 'red',
'white', 'gold', 'darkgreen',
'turqoise', 'olive', 'orange',
'mediumvioletred', 'purple' , 'darkred']
self.styles = ['o' for i in range(len(self.colors))]
# --------------------------------------------------------------------------------
def scatter_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
marker_colors: List[str]=['None'], marker_style: List[str]=['None'],
fill_alpha: np.float32=0.7, edge_color: str='black', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey') -> None:
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param marker_colors: A list of marker colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param marker_style: A list of marker styles, where each marker style corresponds
to a data set. This parameter has a default list of 18 circle
marker styles that the user can override. Marker styles
can be found at :href `marker style<https://matplotlib.org/stable/api/markers_api.html>`
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param edge_color: The color of the line surrounding the marker
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.scatter_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: mat_scatter_test1.eps
:align: center
"""
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
# Error checking
if marker_colors[0] == 'None':
marker_colors = self.colors
if len(marker_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if marker_style[0] == 'None':
marker_style = self.styles
if len(marker_style) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker stye list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(df_list)):
td_plot.scatter(df_list[i][x_header], df_list[i][y_header],
label=column_values[i], marker=marker_style[i],
color=marker_colors[i], alpha=fill_alpha,
edgecolors=edge_color, s=marker_size,
linewidth=marker_edge_width)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def scatter_plot_columns(self, x_headers: List[str], y_headers: List[str],
labels: List[str], style_name: str='default',
marker_colors: List[str]=['None'],
marker_style: List[str]=['None'], fill_alpha: np.float32=0.7,
edge_color: str='black', x_label: str='', y_label: str='',
title: str='', label_pos: str='upper right', x_scale: str='LIN',
y_scale: str='LIN', plot_name: str='NULL', save: bool=False,
label_font_size: int=18, tick_font_size: int=18,
title_font_size: int=24, marker_size: int=35,
marker_edge_width: np.float32=0.8, grid: bool=False,
grid_style='-', grid_color='grey'):
"""
:param x_headers: The title of the dataframe columns containing the x-axis
data sets
:param y_headers: The title of the dataframe columns containing the y-axis
data sets
:param labels: A list of the label names for each data set
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param marker_colors: A list of marker colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param marker_style: A list of marker styles, where each marker style corresponds
to a data set. This parameter has a default list of 18 circle
marker styles that the user can override. Marker styles
can be found at :href `marker style<https://matplotlib.org/stable/api/markers_api.html>`
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param edge_color: The color of the line surrounding the marker
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will plot used defined dataframe columns for the x and
y axis of a 2-d plot as a scatter plot.
.. code-block:: python
> length = 20
> x = np.linspace(0, 20, num=20)
> linear = x
> squared = x ** 2.0
> # create dataframe
> dictionary = {'x': x, 'linear': linear, 'squared': squared}
> df = pd.DataFrame(dictionary)
> # plot data
> obj = MatPlotDataFrame(df)
> x_headers = ['x', 'x']
> y_headers = ['linear', 'squared']
> obj.scatter_plot_columns(x_headers, y_headers, y_headers,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',marker_colors=['red', 'green'],
fill_alpha=0.7, marker_style=['o', '^'],
label_pos='upper left', grid=False, save=True,
plot_name=plt_name)
.. image:: mat_scatter_test2.eps
:align: center
"""
# Error checking
if marker_colors[0] == 'None':
marker_colors = self.colors
if len(x_headers) != len(y_headers):
sys.exit('FATAL ERROR: x and y arrays must be the same size')
if marker_style[0] == 'None':
marker_style = self.styles
if len(marker_style) < len(x_headers):
msg1 = 'FATAL ERROR: The length of the marker stye list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(x_headers)):
td_plot.scatter(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], marker=marker_style[i],
color=marker_colors[i], alpha=fill_alpha,
edgecolors=edge_color, s=marker_size,
linewidth=marker_edge_width)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def line_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey') -> None:
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(df_list)):
td_plot.plot(df_list[i][x_header], df_list[i][y_header],
label=column_values[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def line_plot_columns(self, x_headers: str, y_headers: str, labels: List[str],
style_name: str='default', line_colors: List[str]=['None'],
line_weight: np.float32=2.0, fill_alpha: np.float32=0.7,
line_style: str='-', x_label: str='', y_label: str='',
title: str='', label_pos: str='upper right', x_scale: str='LIN',
y_scale: str='LIN', plot_name: str='NULL', save: bool=False,
label_font_size: int=18, tick_font_size: int=18,
title_font_size: int=24, marker_size: int=35,
marker_edge_width: np.float32=0.8, grid: bool=False,
grid_style='-', grid_color='grey') -> None:
"""
:param x_headers: The title of the dataframe columns containing the x-axis
data sets
:param y_headers: The title of the dataframe columns containing the y-axis
data sets
:param labels: A list containing the name of each label
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will plot used defined dataframe columns for the x and
y axis of a 2-d plot as a line plot.
.. code-block:: python
> length = 20
> x = np.linspace(0, 20, num=20)
> linear = x
> squared = x ** 2.0
> # create dataframe
> dictionary = {'x': x, 'linear': linear, 'squared': squared}
> df = pd.DataFrame(dictionary)
> # plot data
> obj = MatPlotDataFrame(df)
> x_headers = ['x', 'x']
> y_headers = ['linear', 'squared']
> obj.line_plot_columns(x_headers, y_headers, y_headers,
x_label='x-axis', y_label='y-axis', title='Test',
style_name='default',marker_colors=['red', 'green'],
fill_alpha=0.7, marker_style=['o', '^'],
label_pos='upper left', grid=False, save=True,
plot_name=plt_name)
.. image:: line_scatter_test2.eps
:align: center
"""
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(labels):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
for i in range(len(x_headers)):
td_plot.plot(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def timedate_plot_parse_column(self, x_header: str, y_header: str, parsing_header: str,
column_values: List[str], style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey'):
"""
:param x_header: The title of the dataframe column containing the x-axis
data sets. It is assumes that the x axis is the datetime
axis for this plot.
:param y_header: The title of the dataframe column containing the y-axis
data sets
:param parsing_header: The title of the dataframe column containing the
values which will be used to parse the dataframe into
one or multiple data sets
:param column_values: The values contained in the parsing_header column
that will be used to parse the data set into
multiple data sets
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
max_date = self.df[x_header].max()
min_date = self.df[x_header].min()
diff = (max_date - min_date) / np.timedelta64(1, 'D')
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
df_list = [df.set_index(x_header) for df in df_list]
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(column_values):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if diff <= 2:
myfmt = mdates.DateFormatter('%H')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 15:
myfmt = mdates.DateFormatter('%b-%d')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 180:
myfmt = mdates.DateFormatter('%b-%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
elif diff <= 2191:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
else:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(df_list)):
td_plot.plot(df_list[i].index, df_list[i][y_header],
label=column_values[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def timedate_plot_columns(self, x_headers: str, y_headers: str, labels: List[str],
style_name: str='default',
line_colors: List[str]=['None'], line_weight: np.float32=2.0,
fill_alpha: np.float32=0.7, line_style: str='-', x_label: str='',
y_label: str='', title: str='', label_pos: str='upper right',
x_scale: str='LIN', y_scale: str='LIN', plot_name: str='NULL',
save: bool=False, label_font_size: int=18,
tick_font_size: int=18, title_font_size: int=24,
marker_size: int=35, marker_edge_width: np.float32=0.8,
grid: bool=False, grid_style='-', grid_color='grey'):
"""
:param x_headers: The title of the dataframe column containing the x-axis
data sets. It is assumes that the x axis is the datetime
axis for this plot.
:param y_headers: The title of the dataframe column containing the y-axis
data sets
:param labels: A list of the labels to use for each curve in the legend
:param style_name: The name of the matplotlib style that will be used to
format the plot. Defaulted to 'default'. Possible
styles can be found at :href
`styles<https://matplotlib.org/stable/api/style_api.html>`
:param line_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href `colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`
:param line_weight: The weight corresponding to the line thickness, defaulted to 2.0
:param fill_apha: The density of the marker fill. Defaulted to 0.7
:param x_label: The x axis label,defaulted to ' '
:param y_label: The y axis label, defaulted to ' '
:param title: The plot title, defaulted to ' '
:param label_pos: The position of the legend in the plot. Defaulted to 'upper right'
:param x_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param y_scale: 'LOG' or 'LIN', defaulted to 'LIN'
:param plot_name: The name of the file containing the plot if the plot is to
be saved. Defaulted to 'NULL'
:param save: True if the plot is to be saved, False if the plot is to be
shown and not saved. Defaulted to False
:param label_font_size: The label font size, defaulted to 18
:param tick_font_size: The tick font size, defaulted to 18
:param title_font_size: The title font size, defaulted to 24
:param marker_size: The size of the marker, defaulted to 35
:param marker_edge_width: The thickness of the line outlining
each marker. Defaulted to 0.8
:param grid: True if a grid overlaid on the plot is desired, False if not
:param grid_color: Defaulted to 'grey'
:grid_style: Defaulted to '-'
This method will parse a dataframe column based on a user specified
value or list of values, and plot the data in a user specified
x and y axis column based on filter data. As an example, consider
a dataframe with the following columnar data structure.
.. code-block:: python
> length = 20
> x = np.linspace(0, length, num=length)
> linear = x
> squared = x ** 2.0
> lin = np.repeat('linear', length)
> sq = np.repeat('squared', length)
> # Combine arrays into one
> x = np.hstack((x, x))
> y = np.hstack((linear, squared))
> power = np.hstack((lin, sq))
> # Create dataframe
> dictionary = {'x': x, 'y': y, 'power': power}
> df = pd.DataFrame(dictionary)
> # Plot data
> obj = MatPlotDataFrame(df)
> parsing_header = 'power'
> column_values = ['linear', 'squared']
obj.line_plot_filter_column('x', 'y', parsing_header,
column_values,
marker_colors=['red', 'green'],
marker_style=['o', '^'],
label_pos='upper left')
.. image:: line_scatter_test1.eps
:align: center
"""
diff = 0
for i in range(len(x_headers)):
max_date = self.df[x_headers[i]].max()
min_date = self.df[x_headers[i]].min()
delta = (max_date - min_date) / np.timedelta64(1, 'D')
if delta > diff:
diff = delta
# Error checking
if line_colors[0] == 'None':
line_colors = self.colors
if len(line_colors) < len(x_headers):
msg1 = 'FATAL ERROR: The length of the marker color list must be as '
msg2 = 'large or larger than the size of the column values'
sys.exit(msg + ms2)
if save and plot_name == 'NULL':
warnings.warn('if save is True then plot name cannot be NULL')
if y_scale != 'LOG' and y_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
if x_scale != 'LOG' and x_scale != 'LIN':
warnings.warn('y_scale must be set to LOG or LIN')
# begin plot
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
fig, td_plot = plt.subplots()
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
td_plot.set_xlabel(x_label, fontsize=label_font_size)
td_plot.set_ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
td_plot.set_title(title, fontsize=title_font_size)
if x_scale.upper() == 'LOG':
td_plot.set_xscale('log')
if y_scale.upper() == 'LOG':
td_plot.set_yscale('log')
if diff <= 2:
myfmt = mdates.DateFormatter('%H')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 15:
myfmt = mdates.DateFormatter('%b-%d')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(6))
elif diff <= 180:
myfmt = mdates.DateFormatter('%b-%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
elif diff <= 2191:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
else:
myfmt = mdates.DateFormatter('%Y')
td_plot.xaxis.set_major_locator(plt.MaxNLocator(5))
td_plot.xaxis.set_major_formatter(myfmt)
for i in range(len(x_headers)):
td_plot.plot(self.df[x_headers[i]], self.df[y_headers[i]],
label=labels[i], linestyle=line_style,
color=line_colors[i], linewidth=line_weight)
plt.legend(loc=label_pos)
if grid:
plt.grid(color=grid_color, linestyle=grid_style)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def histogram_plot_parse_column(self, header: str, parsing_header: str,
column_values: List[str], x_label: str='',
y_label: str='', colors: List[str]=['None'],
edge_colors: List[str]=['None'],
shading: List[float]=['None'], label_pos: str='upper right',
num_bins: int = 50,
tick_font_size: int = 18, label_font_size: str = 18,
style_name: str = 'default', save: bool = False,
plot_name: str = 'NULL', hist_type: str = 'bar',
dens: bool = False, title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param headers: A string representing the dataframe column that contains the
data to be parsed and plotted
:param parsing_header: A string representing the dataframe header that contains
key phrases that will be used to filter the dataframe
for specific data
:param column_values: The key phrases in the dataframe column described by the
`parsing_header` variable
:param x_label: The title for the x axis. Defaulted to ''
:param y_label: The title for the y axis. Defaulted to ''
:param colors: A list containing the colors that will be used to represent
each plot.
:param edge_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href
`colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`_
:param shading: The density of the fill for each plot, defaulted to 0.7
:param label_pos: The position of the ledgend in the plot. Defaulted to 'upper_right'
:param num_bins: The number of bins used to represent the histogram. Defaulted to 50
:param tick_font_size: The font size of the plot ticks. Defaulted to 18
:param label_font_size: The font size of plot labels. Defaulted to 18
:param style_name: The plot style, defaulted to 'default'. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
:param save: True if the plot is to be saved, False if the plot is only to be
shown
:param plot_name: The name of the plot, if it is to be saved
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> one = np.repeat('one', len(x))
> two = np.repeat('two', len(x))
> x = np.hstack((x, y))
> y = np.hstack((one, two))
> dictionary = {'data': x, 'type': y}
> df = pd.DataFrame(dictionary)
> obj = MatPlotDataFrame(df)
> obj.histogram_plot_parse_column('data', 'type', labels, x_label='x-axis',
y_label='y-axis', shading=[0.9, 0.4], save=True,
.. image:: hist2.eps
:align: center
"""
if colors[0] == "None":
colors = self.colors
if edge_colors[0] == 'None':
edge_colors = np.repeat('black', len(column_values))
if shading[0] == "None":
shading = np.repeat(0.7, len(column_values))
df_list = [self.df[self.df[parsing_header] == col_val] for
col_val in column_values]
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(column_values)):
plt.hist(df_list[i][header], bins=num_bins, color=colors[i], edgecolor=edge_colors[i],
alpha=shading[i], label=column_values[i], histtype=hist_type, density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# --------------------------------------------------------------------------------
def histogram_plot_columns(self, x_headers: List[str], labels: List[str],
x_label: str='',
y_label: str='', colors: List[str]=['None'],
edge_colors: List[str]=['None'],
shading: List[float]=['None'], label_pos: str='upper right',
num_bins: int = 50,
tick_font_size: int = 18, label_font_size: str = 18,
style_name: str = 'default', save: bool = False,
plot_name: str = 'NULL', hist_type: str = 'bar',
dens: bool = False, title: str = 'NULL',
title_font_size: int = 24) -> None:
"""
:param x_headers: A list of strings representing the dataframe columns to be
used for the x axis of a plot
:param labels: A list of labels, each label corresponding to each
histogram
:param x_label: The title for the x axis. Defaulted to ''
:param y_label: The title for the y axis. Defaulted to ''
:param colors: A list containing the colors that will be used to represent
each plot.
:param edge_colors: A list of line colors, where each marker color
corresponds to each data set. This parameter has a
default color lists that can accomodate 18 different
data sets. The user can override the default colors
with a list of their own. Potential colors can be
found at :href
`colors<https://matplotlib.org/stable/gallery/color/named_colors.html>`_
:param shading: The density of the fill for each plot, defaulted to 0.7
:param label_pos: The position of the ledgend in the plot. Defaulted to 'upper_right'
:param num_bins: The number of bins used to represent the histogram. Defaulted to 50
:param tick_font_size: The font size of the plot ticks. Defaulted to 18
:param label_font_size: The font size of plot labels. Defaulted to 18
:param style_name: The plot style, defaulted to 'default'. Acceptable styles can be
found at
`matplotlib styles <https://matplotlib.org/3.2.1/gallery/style_sheets/style_sheets_reference.html>`_.
:param save: True if the plot is to be saved, False if the plot is only to be
shown
:param plot_name: The name of the plot, if it is to be saved
:param hist_type: {``bar``, ``barstacked``, ``step``, ``stepfilled``}
See
`histogram <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.hist.html>`_
for more information.
:param dens: If True, the first element of the return tuple will be the counts
normalized to form a probability density, i.e., the area (or integral)
under the histogram will sum to 1
:param title: The title of the plot to incorporate into the header. Defaulted to NULL
:param title_font_size: The font size for the tile, defaulted to 24
.. code-block:: python
> np.random.seed(19680801)
> x = np.random.normal(15.0, 3.0, 1000)
> y = np.random.normal(20.0, 3.0, 1000)
> data = [x, y]
> labels = ['one', 'two']
> one = np.repeat('one', len(x))
> two = np.repeat('two', len(x))
> x = np.hstack((x, y))
> y = np.hstack((one, two))
> dictionary = {'data': x, 'type': y}
> df = pd.DataFrame(dictionary)
> obj = MatPlotDataFrame(df)
> obj.histogram_plot_parse_column('data', 'type', labels, x_label='x-axis',
y_label='y-axis', shading=[0.9, 0.4], save=True,
.. image:: hist2.eps
:align: center
"""
if colors[0] == "None":
colors = self.colors
if edge_colors[0] == 'None':
edge_colors = np.repeat('black', len(labels))
if shading[0] == "None":
shading = np.repeat(0.7, len(labels))
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.15)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use(style_name)
rc('xtick', labelsize=tick_font_size)
rc('ytick', labelsize=tick_font_size)
plt.xlabel(x_label, fontsize=label_font_size)
plt.ylabel(y_label, fontsize=label_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
if title != 'NULL':
plt.title(title, fontsize=title_font_size)
for i in range(len(x_headers)):
plt.hist(self.df[x_headers[i]], bins=num_bins, color=colors[i],
edgecolor=edge_colors[i], alpha=shading[i], label=labels[i],
density=dens)
plt.legend(loc=label_pos)
if not save:
plt.show()
else:
plt.savefig(plot_name)
plt.close()
# ================================================================================
# ================================================================================
# eof
# TODO Create histogram version of plots
# TODO Repeat for Bokeh plots
| 51.469069
| 128
| 0.561298
| 11,193
| 85,696
| 4.159117
| 0.042259
| 0.031276
| 0.015982
| 0.010053
| 0.925246
| 0.918824
| 0.913411
| 0.907632
| 0.902155
| 0.900543
| 0
| 0.012841
| 0.327518
| 85,696
| 1,664
| 129
| 51.5
| 0.794964
| 0.528624
| 0
| 0.802083
| 0
| 0
| 0.118483
| 0
| 0
| 0
| 0
| 0.000601
| 0
| 1
| 0.020833
| false
| 0
| 0.010417
| 0
| 0.047619
| 0.014881
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86679303b4786d18723b25ff8a6bfe30c222b930
| 389
|
py
|
Python
|
src/sage/manifolds/all.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | 1
|
2021-10-18T01:24:04.000Z
|
2021-10-18T01:24:04.000Z
|
src/sage/manifolds/all.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/all.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
from sage.misc.lazy_import import lazy_import
lazy_import('sage.manifolds.manifold', 'Manifold')
lazy_import('sage.manifolds.differentiable.examples.real_line', 'OpenInterval')
lazy_import('sage.manifolds.differentiable.examples.real_line', 'RealLine')
lazy_import('sage.manifolds.differentiable.examples.euclidean', 'EuclideanSpace')
lazy_import('sage.manifolds', 'catalog', 'manifolds')
| 55.571429
| 81
| 0.820051
| 46
| 389
| 6.73913
| 0.347826
| 0.225806
| 0.225806
| 0.370968
| 0.487097
| 0.487097
| 0.341935
| 0.341935
| 0
| 0
| 0
| 0
| 0.03856
| 389
| 6
| 82
| 64.833333
| 0.828877
| 0
| 0
| 0
| 0
| 0
| 0.614396
| 0.429306
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
866b45019541e0bc4354b8dbdf17d04c3ec02365
| 200
|
py
|
Python
|
aiflearn/explainers/__init__.py
|
gusrabbit/aif360-learn
|
b14a9b98e96dd2756faf312047e9a50ccc1559fa
|
[
"Apache-2.0"
] | null | null | null |
aiflearn/explainers/__init__.py
|
gusrabbit/aif360-learn
|
b14a9b98e96dd2756faf312047e9a50ccc1559fa
|
[
"Apache-2.0"
] | null | null | null |
aiflearn/explainers/__init__.py
|
gusrabbit/aif360-learn
|
b14a9b98e96dd2756faf312047e9a50ccc1559fa
|
[
"Apache-2.0"
] | null | null | null |
from aiflearn.explainers.explainer import Explainer
from aiflearn.explainers.metric_text_explainer import MetricTextExplainer
from aiflearn.explainers.metric_json_explainer import MetricJSONExplainer
| 50
| 73
| 0.91
| 22
| 200
| 8.090909
| 0.454545
| 0.202247
| 0.370787
| 0.314607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06
| 200
| 3
| 74
| 66.666667
| 0.946809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
810df8d57b3177e4e0b257704133cdde592bd50d
| 15,744
|
py
|
Python
|
tests/main/views/test_supplier_questions.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 1
|
2021-05-06T22:37:05.000Z
|
2021-05-06T22:37:05.000Z
|
tests/main/views/test_supplier_questions.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 108
|
2017-06-14T10:48:10.000Z
|
2021-06-11T08:55:25.000Z
|
tests/main/views/test_supplier_questions.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 5
|
2017-06-27T15:13:11.000Z
|
2021-04-10T18:06:29.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from ...helpers import BaseApplicationTest
from dmapiclient import HTTPError
from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub
import mock
from lxml import html
import pytest
class TestClarificationQuestionsPage(BaseApplicationTest):
SIDE_LINKS_XPATH = '//div[@class="column-one-third"]//a'
INSTRUCTION_LINKS_XPATH = '//main[@id="content"]//ul/li/a'
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.supplier_questions.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(),
]
).single_result_response()
self.login_as_buyer()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@staticmethod
def _get_links(document, xpath, text_only=None):
if text_only:
return [e.text_content() for e in document.xpath(xpath)]
return [
(e.text_content(), e.get('href')) for e in document.xpath(xpath)
]
@pytest.mark.parametrize('framework_status', ['live', 'expired'])
def test_show_clarification_questions_page_for_live_brief_with_no_questions(
self, framework_status):
with self.app.app_context():
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status=framework_status,
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
).single_result_response()
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
assert "Supplier questions" in page_html
assert "No questions or answers have been published" in page_html
assert "Answer a supplier question" in page_html
@pytest.mark.parametrize('framework_status', ['live', 'expired'])
def test_show_clarification_questions_page_for_live_brief_with_one_question(self, framework_status):
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status=framework_status,
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
clarification_questions=[
{"question": "Why is my question a question?",
"answer": "Because",
"publishedAt": "2016-01-01T00:00:00.000000Z"}
],
).single_result_response()
brief_json['briefs']['publishedAt'] = "2016-04-02T20:10:00.00000Z"
brief_json['briefs']["clarificationQuestionsAreClosed"] = True
self.data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 200
page_html = res.get_data(as_text=True)
assert "Supplier questions" in page_html
assert "Why is my question a question?" in page_html
assert "Because" in page_html
assert "Answer a supplier question" in page_html
assert "No questions or answers have been published" not in page_html
def test_clarification_questions_page_returns_404_if_not_live_brief(self):
self.data_api_client.get_brief.return_value = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="expired",
clarification_questions=[
{"question": "Why is my question a question?",
"answer": "Because",
"publishedAt": "2016-01-01T00:00:00.000000Z"}
],
).single_result_response()
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 404
def test_clarification_questions_page_returns_404_if_brief_not_correct(self):
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(), # 'Incorrect' lot slug
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
clarification_questions=[
{"question": "Why is my question a question?",
"answer": "Because",
"publishedAt": "2016-01-01T00:00:00.000000Z"}
]
).single_result_response()
brief_json['briefs']['lotSlug'] = "wrong lot slug"
self.data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements/digital-specialists/1234/supplier-questions" # noqa
)
assert res.status_code == 404
class TestAddBriefClarificationQuestion(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.supplier_questions.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_framework.return_value = FrameworkStub(
slug="digital-outcomes-and-specialists-4",
status="live",
lots=[
LotStub(slug="digital-specialists", allows_brief=True).response(),
]
).single_result_response()
self.login_as_buyer()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_show_brief_clarification_question_form_for_live_and_expired_framework(self):
framework_statuses = ['live', 'expired']
for framework_status in framework_statuses:
self.data_api_client.get_framework.return_value = FrameworkStub(
slug="digital-outcomes-and-specialists-4",
status=framework_status,
lots=[
LotStub(slug="digital-specialists", allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.get(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question")
assert res.status_code == 200
def test_add_brief_clarification_question_for_live_and_expired_framework(self):
framework_statuses = ['live', 'expired']
for framework_status in framework_statuses:
self.data_api_client.get_framework.return_value = FrameworkStub(
slug="digital-outcomes-and-specialists-4",
status=framework_status,
lots=[
LotStub(slug="digital-specialists", allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 302
self.data_api_client.add_brief_clarification_question.assert_called_with(
"1234", "Why?", "Because", "buyer@email.com")
# test that the redirect ends up on the right page
assert res.headers['Location'].endswith(
'/buyers/frameworks/digital-outcomes-and-specialists-4/requirements/digital-specialists/1234/supplier-questions' # noqa
) is True
def test_404_if_framework_is_not_live_or_expired(self):
for framework_status in ['coming', 'open', 'pending', 'standstill']:
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status=framework_status,
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 404
assert not self.data_api_client.add_brief_clarification_question.called
def test_404_if_framework_does_not_allow_brief(self):
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=False).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 404
assert not self.data_api_client.add_brief_clarification_question.called
def test_404_if_brief_does_not_belong_to_user(self):
self.data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response(),
]
).single_result_response()
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
user_id=234,
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 404
assert not self.data_api_client.add_brief_clarification_question.called
def test_404_if_brief_is_not_live(self):
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="draft",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 404
assert not self.data_api_client.add_brief_clarification_question.called
def test_validation_error(self):
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
self.data_api_client.add_brief_clarification_question.side_effect = HTTPError(
mock.Mock(status_code=400),
{"question": "answer_required"})
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
document = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 400
assert len(document.cssselect(".govuk-form-group--error")) == 1, res.get_data(as_text=True)
def test_api_error(self):
brief_json = BriefStub(
framework_slug="digital-outcomes-and-specialists-4",
status="live",
).single_result_response()
brief_json['briefs']["clarificationQuestionsAreClosed"] = False
self.data_api_client.get_brief.return_value = brief_json
self.data_api_client.add_brief_clarification_question.side_effect = HTTPError(
mock.Mock(status_code=500))
res = self.client.post(
"/buyers/frameworks/digital-outcomes-and-specialists-4/requirements"
"/digital-specialists/1234/supplier-questions/answer-question",
data={
"question": "Why?",
"answer": "Because",
})
assert res.status_code == 500
| 43.134247
| 136
| 0.629192
| 1,653
| 15,744
| 5.733212
| 0.109498
| 0.028807
| 0.053498
| 0.066371
| 0.881081
| 0.873483
| 0.863775
| 0.859555
| 0.841406
| 0.835285
| 0
| 0.021953
| 0.265117
| 15,744
| 364
| 137
| 43.252747
| 0.797148
| 0.00686
| 0
| 0.766026
| 0
| 0.016026
| 0.258975
| 0.18788
| 0
| 0
| 0
| 0
| 0.086538
| 1
| 0.054487
| false
| 0
| 0.022436
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8121bfa7a1f593c8b37e670b8054bbed322ae0bf
| 22,955
|
py
|
Python
|
ververica_api_sdk/api/secret_value_resource_api.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
ververica_api_sdk/api/secret_value_resource_api.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
ververica_api_sdk/api/secret_value_resource_api.py
|
justlikemikezz/ververica-api-sdk
|
0eee284b4433f74b35fd2f41d149e619624aaed3
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
Application Manager API
Application Manager APIs to control Apache Flink jobs # noqa: E501
OpenAPI spec version: 2.0.1
Contact: platform@ververica.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ververica_api_sdk.api_client import ApiClient
class SecretValueResourceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_secret_value_using_post(self, namespace, secret_value, **kwargs): # noqa: E501
"""Create a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_secret_value_using_post(namespace, secret_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: namespace (required)
:param SecretValue secret_value: secretValue (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_secret_value_using_post_with_http_info(namespace, secret_value, **kwargs) # noqa: E501
else:
(data) = self.create_secret_value_using_post_with_http_info(namespace, secret_value, **kwargs) # noqa: E501
return data
def create_secret_value_using_post_with_http_info(self, namespace, secret_value, **kwargs): # noqa: E501
"""Create a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_secret_value_using_post_with_http_info(namespace, secret_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: namespace (required)
:param SecretValue secret_value: secretValue (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'secret_value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_secret_value_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_secret_value_using_post`") # noqa: E501
# verify the required parameter 'secret_value' is set
if ('secret_value' not in params or
params['secret_value'] is None):
raise ValueError("Missing the required parameter `secret_value` when calling `create_secret_value_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'secret_value' in params:
body_params = params['secret_value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/namespaces/{namespace}/secret-values', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecretValue', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_secret_value_using_delete(self, name, namespace, **kwargs): # noqa: E501
"""Delete a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_secret_value_using_delete(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_secret_value_using_delete_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_secret_value_using_delete_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
def delete_secret_value_using_delete_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""Delete a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_secret_value_using_delete_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_secret_value_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_secret_value_using_delete`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_secret_value_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/namespaces/{namespace}/secret-values/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecretValue', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_secret_value_using_get(self, name, namespace, **kwargs): # noqa: E501
"""Get a secret value by name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_secret_value_using_get(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_secret_value_using_get_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.get_secret_value_using_get_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
def get_secret_value_using_get_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""Get a secret value by name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_secret_value_using_get_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_secret_value_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_secret_value_using_get`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `get_secret_value_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/namespaces/{namespace}/secret-values/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecretValue', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_secret_values_using_get(self, namespace, **kwargs): # noqa: E501
"""List all secrets values # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_secret_values_using_get(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: namespace (required)
:return: ResourceListSecretValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_secret_values_using_get_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.get_secret_values_using_get_with_http_info(namespace, **kwargs) # noqa: E501
return data
def get_secret_values_using_get_with_http_info(self, namespace, **kwargs): # noqa: E501
"""List all secrets values # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_secret_values_using_get_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: namespace (required)
:return: ResourceListSecretValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_secret_values_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `get_secret_values_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/namespaces/{namespace}/secret-values', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceListSecretValue', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_secret_value_using_patch(self, body, name, namespace, **kwargs): # noqa: E501
"""Update a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_secret_value_using_patch(body, name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ComDataartisansAppmanagerApiV1EntitySecretvalueSecretValue body: (required)
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_secret_value_using_patch_with_http_info(body, name, namespace, **kwargs) # noqa: E501
else:
(data) = self.update_secret_value_using_patch_with_http_info(body, name, namespace, **kwargs) # noqa: E501
return data
def update_secret_value_using_patch_with_http_info(self, body, name, namespace, **kwargs): # noqa: E501
"""Update a secret value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_secret_value_using_patch_with_http_info(body, name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ComDataartisansAppmanagerApiV1EntitySecretvalueSecretValue body: (required)
:param str name: name (required)
:param str namespace: namespace (required)
:return: SecretValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'name', 'namespace'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_secret_value_using_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_secret_value_using_patch`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_secret_value_using_patch`") # noqa: E501
# verify the required parameter 'namespace' is set
if ('namespace' not in params or
params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `update_secret_value_using_patch`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
if 'namespace' in params:
path_params['namespace'] = params['namespace'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/namespaces/{namespace}/secret-values/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecretValue', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.991071
| 137
| 0.622392
| 2,609
| 22,955
| 5.220391
| 0.063626
| 0.046402
| 0.043465
| 0.026432
| 0.954552
| 0.939427
| 0.927827
| 0.922247
| 0.903084
| 0.883113
| 0
| 0.015444
| 0.286343
| 22,955
| 559
| 138
| 41.064401
| 0.815957
| 0.30538
| 0
| 0.782895
| 0
| 0
| 0.219389
| 0.071044
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036184
| false
| 0
| 0.013158
| 0
| 0.101974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8142aabe95c7abbe5d41840421797be452f485aa
| 608
|
py
|
Python
|
Test/test.py
|
induraj2020/DeepEnsemble
|
e0a459cc5741f376cb26c43538cde74a8c6d3b22
|
[
"MIT"
] | 1
|
2021-08-02T12:22:25.000Z
|
2021-08-02T12:22:25.000Z
|
Test/test.py
|
induraj2020/DeepEnsemble
|
e0a459cc5741f376cb26c43538cde74a8c6d3b22
|
[
"MIT"
] | null | null | null |
Test/test.py
|
induraj2020/DeepEnsemble
|
e0a459cc5741f376cb26c43538cde74a8c6d3b22
|
[
"MIT"
] | null | null | null |
import numpy as np
if __name__ =="__main__":
xx = np.array([[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.8, 0.6, 0.3, 0.4],
[0.8, 0.6, 0.7, 0.8],
[0.8, 0.8, 0.8, 0.4],
[0.1, 0.6, 0.7, 0.8],
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.8, 0.6, 0.3, 0.4],
[0.8, 0.6, 0.7, 0.8],
[0.8, 0.8, 0.8, 0.4],
[0.1, 0.6, 0.7, 0.8]
])
y_actual = np.array([1,0,0,1,0,1,1,0,0,1,0,1])
| 32
| 50
| 0.271382
| 122
| 608
| 1.278689
| 0.163934
| 0.205128
| 0.288462
| 0.205128
| 0.692308
| 0.692308
| 0.615385
| 0.615385
| 0.615385
| 0.615385
| 0
| 0.346154
| 0.486842
| 608
| 18
| 51
| 33.777778
| 0.153846
| 0
| 0
| 0.5
| 0
| 0
| 0.013201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4ad6d874e599704d15645ad66d6a46ceed91670
| 8,379
|
py
|
Python
|
resources/tasks.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 1
|
2021-03-13T16:04:54.000Z
|
2021-03-13T16:04:54.000Z
|
resources/tasks.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 7
|
2021-07-21T12:42:39.000Z
|
2022-01-06T10:34:04.000Z
|
resources/tasks.py
|
axonepro/sdk-ooti
|
146ba758f571352d02daa56349e8b3affd8ca5a9
|
[
"Unlicense"
] | 2
|
2021-06-22T08:10:48.000Z
|
2021-09-01T09:16:41.000Z
|
import requests
import json
from .helper import Helper
class Tasks(Helper):
def __init__(self, base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination):
super().__init__(base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination)
def empty_tasks_trash(self, project_id):
""" Set delete all not-completed archived tasks in project """
route = 'v1/tasks/empty-trash/{0}/'.format(project_id)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_task_labels_list(self, page=1):
""" Get the list of tasks labels """
route = 'v1/tasks/label/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response, True)
def create_task_label(self, data):
""" Create a new task label
Keywords arguments:
data -- data of the new label to be created:
{
"creator": orguser_pk,
"team": team_pk,
"title": "label title",
"description": "new task label"
}
"""
route = 'v1/tasks/label/list/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def get_task_label_details(self, label_pk):
""" Get the task label details
Keywords arguments:
label_pk -- pk of the task label
"""
route = 'v1/tasks/label/{0}/'.format(label_pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def update_task_label_details(self, label_pk, data):
""" Update the task label details
Keywords arguments:
label_pk -- pk of the task label
data -- content of the update:
{
"creator": orguser_pk,
"team": team_pk,
"title": "new title",
"description": "description updated"
}
"""
route = 'v1/tasks/label/{0}/'.format(label_pk)
response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def delete_task_label(self, label_pk):
""" Delete the task label details
Keywords arguments:
label_pk -- pk of the task label
"""
route = 'v1/tasks/label/{0}/'.format(label_pk)
response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_tasks_list(self, page=1):
""" Get the tasks list """
route = 'v1/tasks/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response, True)
def create_task(self, data):
""" Create a new task
Keywords arguments:
data -- data of the new task to be created:
{
"creator": orguser_pk,
"created_at": "string",
"labels": [
label_pk,
...
],
"title": "string",
"due_date": "string",
"description": "string"
}
"""
route = 'v1/tasks/list/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def get_tasks_lists_list(self, page=1):
""" Get the list of tasks list """
route = 'v1/tasks/lists/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response, True)
def create_tasks_list(self, data):
""" Create a new list of tasks
Keywords arguments:
data -- data of the new list of tasks to be created:
{
"author": orguser_pk,
"title": "new list",
"tasks": [
task_pk,
...
],
"followers": [
orguser_pk,
...
]
}
"""
route = 'v1/tasks/lists/list/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def get_tasks_list_details(self, list_pk):
""" Get the list of tasks details
Keywords arguments:
list_pk -- the pk of list of tasks
"""
route = 'v1/tasks/lists/{0}/'.format(list_pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def update_tasks_list_details(self, list_pk, data):
""" Update the list of tasks details
Keywords arguments:
list_pk -- the pk of list of tasks
data -- content of the update:
{
"author": orguser_pk,
"title": "new list",
"tasks": [
task_pk,
...
],
"followers": [
orguser_pk,
...
]
}
"""
route = 'v1/tasks/lists/{0}/'.format(list_pk)
response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def delete_tasks_list(self, list_pk):
""" Delete the list of tasks
Keywords arguments:
list_pk -- the pk of list of tasks
"""
route = 'v1/tasks/lists/{0}/'.format(list_pk)
response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def log_tasks(self):
""" Set all tasks to is_logged True """
route = 'v1/tasks/log-tasks/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_tasks_timeline(self):
route = 'v1/tasks/timeline/{0}/'.format(self.org_pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def get_task_details(self, pk):
""" Get task details
Keywords arguments:
pk -- the pk of the task
"""
route = 'v1/tasks/{0}/'.format(pk)
response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None)
return self.process_response(response)
def update_task_details(self, pk, data):
""" Update task details
Keywords arguments:
pk -- the pk of the task
data -- content of the update:
{
"creator": orguser_pk,
"created_at": "string",
"estimate": 0,
"is_logged": true,
"labels": [
"string"
],
"title": "string",
"due_date": "string",
"completed_at": "string",
"description": "string",
"is_completed": true
}
"""
route = 'v1/tasks/{0}/'.format(pk)
response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data))
return self.process_response(response)
def delete_task(self, pk):
""" Delete task
Keywords arguments:
pk -- the pk of the task
"""
route = 'v1/tasks/{0}/'.format(pk)
response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None)
return self.process_response(response)
| 34.060976
| 118
| 0.583602
| 1,004
| 8,379
| 4.706175
| 0.085657
| 0.08381
| 0.044233
| 0.099048
| 0.861587
| 0.829841
| 0.771429
| 0.740741
| 0.72381
| 0.709418
| 0
| 0.007761
| 0.292636
| 8,379
| 246
| 119
| 34.060976
| 0.789438
| 0.269483
| 0
| 0.576923
| 0
| 0
| 0.09564
| 0.048588
| 0
| 0
| 0
| 0
| 0
| 1
| 0.24359
| false
| 0
| 0.038462
| 0
| 0.525641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
be06134ed57654f8f44cb50c8e9bd5388a95f7bf
| 50,374
|
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/api/tags_v1_api.py
|
polyaxon/polyaxon
|
a835f2872a63f6cf5c27d2dd1125ad7c18eb849a
|
[
"Apache-2.0"
] | 3,200
|
2017-05-09T11:35:31.000Z
|
2022-03-28T05:43:22.000Z
|
sdks/python/http_client/v1/polyaxon_sdk/api/tags_v1_api.py
|
polyaxon/polyaxon
|
a835f2872a63f6cf5c27d2dd1125ad7c18eb849a
|
[
"Apache-2.0"
] | 1,324
|
2017-06-29T07:21:27.000Z
|
2022-03-27T12:41:10.000Z
|
sdks/python/http_client/v1/polyaxon_sdk/api/tags_v1_api.py
|
polyaxon/polyaxon
|
a835f2872a63f6cf5c27d2dd1125ad7c18eb849a
|
[
"Apache-2.0"
] | 341
|
2017-01-10T23:06:53.000Z
|
2022-03-10T08:15:18.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.11.3
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TagsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_tag(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_tag_with_http_info(owner, body, **kwargs) # noqa: E501
def create_tag_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_tag(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def delete_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_tag`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def get_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_tag`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_tags(self, owner, **kwargs): # noqa: E501
"""List tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tags(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_tags_with_http_info(owner, **kwargs) # noqa: E501
def list_tags_with_http_info(self, owner, **kwargs): # noqa: E501
"""List tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tags_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListTagsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def load_tags(self, owner, **kwargs): # noqa: E501
"""Load tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_tags(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1LoadTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.load_tags_with_http_info(owner, **kwargs) # noqa: E501
def load_tags_with_http_info(self, owner, **kwargs): # noqa: E501
"""Load tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_tags_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1LoadTagsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method load_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `load_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/load', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1LoadTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_tag(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_tag_with_http_info(owner, tag_name, body, **kwargs) # noqa: E501
def patch_tag_with_http_info(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag_with_http_info(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'tag_name',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'tag_name' is set
if self.api_client.client_side_validation and ('tag_name' not in local_var_params or # noqa: E501
local_var_params['tag_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tag_name` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'tag_name' in local_var_params:
path_params['tag.name'] = local_var_params['tag_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{tag.name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sync_tags(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sync_tags_with_http_info(owner, body, **kwargs) # noqa: E501
def sync_tags_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sync_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `sync_tags`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `sync_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/sync', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_tag(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Update tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_tag_with_http_info(owner, tag_name, body, **kwargs) # noqa: E501
def update_tag_with_http_info(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Update tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag_with_http_info(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'tag_name',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update_tag`") # noqa: E501
# verify the required parameter 'tag_name' is set
if self.api_client.client_side_validation and ('tag_name' not in local_var_params or # noqa: E501
local_var_params['tag_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tag_name` when calling `update_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'tag_name' in local_var_params:
path_params['tag.name'] = local_var_params['tag_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{tag.name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.016979
| 116
| 0.583019
| 5,720
| 50,374
| 4.908392
| 0.044406
| 0.054709
| 0.087762
| 0.027354
| 0.957686
| 0.956333
| 0.951133
| 0.949993
| 0.949993
| 0.941979
| 0
| 0.017831
| 0.337595
| 50,374
| 1,118
| 117
| 45.057245
| 0.823573
| 0.423234
| 0
| 0.797814
| 0
| 0
| 0.166718
| 0.029753
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030965
| false
| 0
| 0.009107
| 0
| 0.071038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be2bebae866fad9fcb8324bc2c8872385bb37012
| 17,903
|
py
|
Python
|
mesh_tensorflow/transformer/vocab_embeddings_test.py
|
bmaier96/mesh
|
c2142a3b4b5f5eaf37a926d30525d2cf8334c65b
|
[
"Apache-2.0"
] | null | null | null |
mesh_tensorflow/transformer/vocab_embeddings_test.py
|
bmaier96/mesh
|
c2142a3b4b5f5eaf37a926d30525d2cf8334c65b
|
[
"Apache-2.0"
] | 1
|
2021-02-24T00:49:53.000Z
|
2021-02-24T00:49:53.000Z
|
mesh_tensorflow/transformer/vocab_embeddings_test.py
|
isabella232/mesh-1
|
3e8c165ef229f20a6c5a28561857c0129ab85368
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.vocab_embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import vocab_embeddings
import mock
import numpy as np
import scipy.misc
import tensorflow.compat.v1 as tf
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
'Shape {} not found in shape to value map.'.format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
class FactorizedVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(FactorizedVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 1], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 2], [2, 4], [3, 6], [2, 4]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual, model_size**-0.5 *
np.array([[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]]))
class AdaptiveVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(AdaptiveVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_constructor_tokenCountsDontSumToVocabSize_raisesValueError(self):
vocab_dim = mtf.Dimension('vocab', 5)
model_dim = mtf.Dimension('model', 2)
with self.assertRaises(ValueError):
vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 6
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3, 4, 0], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual,
[[0, 1], [2, 0], [1, 2], [2, 4], [3, 6], [0, 1]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual,
model_size**-0.5 * np.array([[0, 2, 1, 2, 3], [1, 0, 2, 4, 6],
[1, 2, 3, 6, 9], [1, 4, 4, 8, 12]]))
class MixtureOfSoftmaxesTest(tf.test.TestCase):
def setUp(self):
super(MixtureOfSoftmaxesTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 4
model_size = 3
num_softmaxes = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]],
# Mixture weights.
(1, 3): [[1, 0, 0]],
# Context weights
(1, 3, 3): [[[1, 0, 0], [0, 1, 0], [0, 0, 1]],],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 4
model_size = 3
num_softmaxes = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant(
np.array([[1.0, 1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]],
# Mixture weights.
(2, 3): [[1, 0, 0], [0, 1, 1]],
# Context weights
(2, 3, 3): [
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [1, 0, 0]],
],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
expected_priors = scipy.special.softmax([1, 3])
expected_probs_1 = scipy.special.softmax(np.tanh([1, 1, 2, 2]))
expected_probs_2 = scipy.special.softmax(np.tanh([2, 1, 1, 1]))
expected_probs = (
expected_priors[0] * expected_probs_1 +
expected_priors[1] * expected_probs_2)
expected_logits = np.log(expected_probs)
self.assertAllClose(actual, [expected_logits])
class MixtapeTest(tf.test.TestCase):
def setUp(self):
super(MixtapeTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 5
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
ids = tf.constant([0, 1, 2, 3, 4], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): list(range(16)),
# Prior weights.
(3, 1, 2): list(range(6)),
# Prior vocab vector.
(2, 1): list(range(2)),
# Prior gates vector.
(3, 2): list(range(6)),
# Prior bias.
(2, 3): list(range(6)),
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, np.reshape(list(range(10)), (5, 2)))
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
embeddings = tf.constant(
np.array([[1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): [
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 0], [0, 0]],
[[0, 0], [0, 1]],
],
# Prior weights.
(3, 1, 2): [
[[1, 0]],
[[0, 1]],
[[1, 1]],
],
# Prior vocab vector.
(2, 1): [[1], [1]],
# Prior gates vector.
(3, 2): [
[1, 0],
[0, 1],
[1, 1],
],
# Prior bias.
(2, 3): [[1, 2, 3], [3, 4, 5]],
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction,
noise_std_dev=0.0)
mtf_logits = vocab_embedding.hidden_to_logits(
mtf_embeddings, context=context)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
self.assertAllClose(actual,
[[0.905462, 4.390559, 6.575162, 9.513036, 12.450909]])
if __name__ == '__main__':
tf.test.main()
| 33.153704
| 80
| 0.649724
| 2,298
| 17,903
| 4.813751
| 0.096171
| 0.007051
| 0.035256
| 0.004339
| 0.836377
| 0.815133
| 0.812873
| 0.798409
| 0.790363
| 0.782679
| 0
| 0.031964
| 0.220633
| 17,903
| 539
| 81
| 33.215213
| 0.76084
| 0.057141
| 0
| 0.7675
| 0
| 0
| 0.032666
| 0.005939
| 0
| 0
| 0
| 0
| 0.0225
| 1
| 0.0425
| false
| 0
| 0.0425
| 0.005
| 0.105
| 0.0025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077f076797479d732d8e2ee7dd1132370a47b6bd
| 131
|
py
|
Python
|
0/actor.py
|
JacobFV/Computatrum
|
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
|
[
"MIT"
] | null | null | null |
0/actor.py
|
JacobFV/Computatrum
|
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
|
[
"MIT"
] | null | null | null |
0/actor.py
|
JacobFV/Computatrum
|
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
|
[
"MIT"
] | null | null | null |
class actor:
def action_vector_length():pass
def perform_action(self, action):pass
def log_action(self, action):pass
| 32.75
| 42
| 0.725191
| 19
| 131
| 4.789474
| 0.526316
| 0.153846
| 0.351648
| 0.43956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 131
| 4
| 43
| 32.75
| 0.850467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.75
| false
| 0.75
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
078895e228a18403fab71d0a6710b77c6b746ff1
| 1,555
|
py
|
Python
|
test_pocket.py
|
JackMaguire/RobotsEnv
|
9e43a9d4e202798e9104e681a7d0d6e41c75d163
|
[
"MIT"
] | null | null | null |
test_pocket.py
|
JackMaguire/RobotsEnv
|
9e43a9d4e202798e9104e681a7d0d6e41c75d163
|
[
"MIT"
] | null | null | null |
test_pocket.py
|
JackMaguire/RobotsEnv
|
9e43a9d4e202798e9104e681a7d0d6e41c75d163
|
[
"MIT"
] | null | null | null |
import robots_core
from robots_core.pocket import *
sr = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000101002010000000010000000000030000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
b = robots_core.Board( sr )
#posts = find_cardinal_posts( b )
p = create_pocket( b )
print( p.diagonal_offsets[ DiagonalQuadrant.UP_LEFT ] )
| 129.583333
| 1,357
| 0.96463
| 31
| 1,555
| 48.129032
| 0.645161
| 0.020107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.886991
| 0.021222
| 1,555
| 11
| 1,358
| 141.363636
| 0.093298
| 0.020579
| 0
| 0
| 0
| 0
| 0.886991
| 0.886991
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
078f211489fbd36f8b9da27995af2d970efb2139
| 11,285
|
py
|
Python
|
tests/checks/mock/test_kubernetes.py
|
kevinmckinley/dd-agent
|
bbc376da5b2a7b0419125a9da002eab3e80dc539
|
[
"BSD-3-Clause"
] | null | null | null |
tests/checks/mock/test_kubernetes.py
|
kevinmckinley/dd-agent
|
bbc376da5b2a7b0419125a9da002eab3e80dc539
|
[
"BSD-3-Clause"
] | null | null | null |
tests/checks/mock/test_kubernetes.py
|
kevinmckinley/dd-agent
|
bbc376da5b2a7b0419125a9da002eab3e80dc539
|
[
"BSD-3-Clause"
] | null | null | null |
# stdlib
import mock
# 3p
import simplejson as json
# project
from tests.checks.common import AgentCheckTest, Fixtures
from checks import AgentCheck
CPU = "CPU"
MEM = "MEM"
FS = "fs"
NET = "net"
NET_ERRORS = "net_errors"
DISK = "disk"
DISK_USAGE = "disk_usage"
PODS = "pods"
METRICS = [
('kubernetes.memory.usage', MEM),
('kubernetes.filesystem.usage', FS),
('kubernetes.filesystem.usage_pct', FS),
('kubernetes.cpu.usage.total', CPU),
('kubernetes.network.tx_bytes', NET),
('kubernetes.network.rx_bytes', NET),
('kubernetes.network_errors', NET_ERRORS),
('kubernetes.diskio.io_service_bytes.stats.total', DISK),
('kubernetes.filesystem.usage_pct', DISK_USAGE),
('kubernetes.filesystem.usage', DISK_USAGE),
('kubernetes.pods.running', PODS),
]
class TestKubernetes(AgentCheckTest):
CHECK_NAME = 'kubernetes'
def test_fail(self):
# To avoid the disparition of some gauges during the second check
mocks = {'_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json"))}
config = {
"instances": [{"host": "foo"}]
}
with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False))):
with mock.patch('utils.kubeutil.KubeUtil.extract_kube_labels', side_effect=lambda x: json.loads(Fixtures.read_file("kube_labels.json"))):
# Can't use run_check_twice due to specific metrics
self.run_check(config, mocks=mocks, force_reload=True)
self.assertServiceCheck("kubernetes.kubelet.check", status=AgentCheck.CRITICAL)
def test_metrics(self):
# To avoid the disparition of some gauges during the second check
mocks = {
'_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json")),
}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False
}
]
}
# parts of the json returned by the kubelet api is escaped, keep it untouched
with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False))):
with mock.patch('utils.kubeutil.KubeUtil.extract_kube_labels', side_effect=lambda x: json.loads(Fixtures.read_file("kube_labels.json"))):
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
expected_tags = [
(['container_name:/kubelet', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_ef0ed5f9', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['container_name:/kube-proxy', 'pod_name:no_pod'], [MEM, CPU, NET]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_POD.2688308a_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_295f14ff', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['container_name:/docker-daemon', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_etcd.2e44beff_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_e3e504ad', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'container_name:k8s_POD.e4cc795_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_49dd977d', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_skydns.1e752dc0_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_7c1345a1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['container_name:/', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['container_name:/system/docker', 'pod_name:no_pod'], [MEM, CPU, DISK, NET]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-dhdzk_default_ba151259-36e0-11e5-84ce-42010af01c62_19879457', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['container_name:/system', 'pod_name:no_pod'], [MEM, CPU, NET, DISK]),
(['kube_replication_controller:kube-ui-v1', 'kube_namespace:kube-system', 'container_name:k8s_POD.3b46e8b9_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_209ed1dc', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'container_name:k8s_kube2sky.1afa6a47_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_624bc34c', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_45d1185b', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_POD.e4cc795_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_5ad59bf3', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'container_name:k8s_haproxy.69b6303b_haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la_default_86527bf8-36cd-11e5-84ce-42010af01c62_a35b9731', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'container_name:k8s_kube-ui.c17839c_kube-ui-v1-sv2sq_kube-system_b7e8f250-3619-11e5-84ce-42010af01c62_d2b9aa90', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe','kube_namespace:default', 'container_name:k8s_propjoe.21f63023_propjoe-lkc3l_default_3a9b1759-4055-11e5-84ce-42010af01c62_9fe8b7b0', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8','kube_namespace:kube-system', 'container_name:k8s_healthz.4469a25d_kube-dns-v8-smhcb_kube-system_b80ffab3-3619-11e5-84ce-42010af01c62_241c34d1', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion','kube_namespace:kube-system', 'container_name:k8s_fluentd-cloud-logging.7721935b_fluentd-cloud-logging-kubernetes-minion-mu4w_kube-system_d0feac1ad02da9e97c4bf67970ece7a1_2c3c0879', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['container_name:dd-agent', 'pod_name:no_pod'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:l7-lb-controller'], [PODS]),
(['kube_replication_controller:redis-slave'], [PODS]),
(['kube_replication_controller:frontend'], [PODS]),
(['kube_replication_controller:heapster-v11'], [PODS]),
]
for m, _type in METRICS:
for tags, types in expected_tags:
if _type in types:
self.assertMetric(m, count=1, tags=tags)
self.coverage_report()
def test_historate(self):
# To avoid the disparition of some gauges during the second check
mocks = {'_retrieve_metrics': lambda x: json.loads(Fixtures.read_file("metrics.json"))}
config = {
"instances": [
{
"host": "foo",
"enable_kubelet_checks": False,
"use_histogram": True,
}
]
}
# parts of the json returned by the kubelet api is escaped, keep it untouched
with mock.patch('utils.kubeutil.KubeUtil.retrieve_pods_list', side_effect=lambda: json.loads(Fixtures.read_file("pods_list.json", string_escape=False))):
with mock.patch('utils.kubeutil.KubeUtil.extract_kube_labels', side_effect=lambda x: json.loads(Fixtures.read_file("kube_labels.json"))):
# Can't use run_check_twice due to specific metrics
self.run_check_twice(config, mocks=mocks, force_reload=True)
metric_suffix = ["count", "avg", "median", "max", "95percentile"]
expected_tags = [
(['pod_name:no_pod'], [MEM, CPU, NET, DISK, DISK_USAGE, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:fluentd-cloud-logging-kubernetes-minion', 'kube_namespace:kube-system', 'pod_name:kube-system/fluentd-cloud-logging-kubernetes-minion-mu4w'], [MEM, CPU, FS, NET, NET_ERRORS, DISK]),
(['kube_replication_controller:kube-dns-v8', 'kube_namespace:kube-system', 'pod_name:kube-system/kube-dns-v8-smhcb'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-dhdzk'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:kube-ui-v1','kube_namespace:kube-system', 'pod_name:kube-system/kube-ui-v1-sv2sq'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:propjoe', 'kube_namespace:default', 'pod_name:default/propjoe-lkc3l'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:haproxy-6db79c7bbcac01601ac35bcdb18868b3', 'kube_namespace:default', 'pod_name:default/haproxy-6db79c7bbcac01601ac35bcdb18868b3-rr7la'], [MEM, CPU, FS, NET, NET_ERRORS]),
(['kube_replication_controller:l7-lb-controller'], [PODS]),
(['kube_replication_controller:redis-slave'], [PODS]),
(['kube_replication_controller:frontend'], [PODS]),
(['kube_replication_controller:heapster-v11'], [PODS]),
]
for m, _type in METRICS:
for m_suffix in metric_suffix:
for tags, types in expected_tags:
if _type in types:
self.assertMetric("{0}.{1}".format(m, m_suffix), count=1, tags=tags)
self.coverage_report()
| 75.233333
| 375
| 0.694284
| 1,405
| 11,285
| 5.333808
| 0.147331
| 0.046704
| 0.103416
| 0.036696
| 0.848012
| 0.847078
| 0.842941
| 0.831999
| 0.802642
| 0.755404
| 0
| 0.072664
| 0.164643
| 11,285
| 149
| 376
| 75.738255
| 0.722287
| 0.045281
| 0
| 0.308333
| 0
| 0.066667
| 0.542464
| 0.499814
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.025
| false
| 0
| 0.033333
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07bfc2d2478041b0b5fe34c7b615c41201bf65dc
| 7,524
|
py
|
Python
|
webcam/recognition.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 5
|
2020-02-10T04:38:40.000Z
|
2021-09-01T18:50:18.000Z
|
webcam/recognition.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 1
|
2020-06-11T18:26:38.000Z
|
2020-06-11T18:26:38.000Z
|
webcam/recognition.py
|
newTypeGeek/face-recognition
|
235cf4aaf60ba3504b0e73dbab5f9dc4c7cc3dbd
|
[
"Apache-2.0"
] | 3
|
2019-06-24T12:30:12.000Z
|
2020-02-10T04:39:59.000Z
|
#!/usr/bin/env python3
##################
# recognition.py #
##################
# Method to perform face recognition from 128-d vectors
# These functions are used in recognize_video.py
import numpy as np
import pickle
import time
import sys
def svm(vector, recognizer, le, max_elapsed):
'''
Face recognition by SVM
Arguments:
1. vector: Input 128-d vector
2. recognizer: SVM model
3. le: Encoded label for SVM
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Probability of SVM classification
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
preds = recognizer.predict_proba(vector)[0]
# preds = recognizer.predict(vector)[0]
# print(preds)
j = np.argmax(preds)
# j = preds
name = le.classes_[j]
score = preds[j]
# score = 0
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def knn(vector, recognizer, le, max_elapsed):
'''
Face recognition by KNN
Arguments:
1. vector: Input 128-d vector
2. recognizer: KNN model
3. le: Encoded label for KNN
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Probability of KNN classification
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
preds = recognizer.predict_proba(vector)[0]
j = np.argmax(preds)
name = le.classes_[j]
score = preds[j]
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def rf(vector, recognizer, le, max_elapsed):
'''
Face recognition by Random Forest
Arguments:
1. vector: Input 128-d vector
2. recognizer: Random Forest model
3. le: Encoded label for KNN
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Probability of Random Forest classification
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
preds = recognizer.predict_proba(vector)[0]
# preds = recognizer.predict(vector)[0]
j = np.argmax(preds)
# j = preds
name = le.classes_[j]
score = preds[j]
# score = 0
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def pearson(vector, vectors, labels, max_elapsed):
'''
Face recognition by searching for the
maximum Pearson correlation with the database
Arguments:
1. vector: Input 128-d vector
2. vectors: 128-d vectors from database
3. labels: Identities of 128-d vectors from database
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Optimal value of Pearson correlation
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
n = len(labels)
idx = 0
score = -1
total = 0
# This is faster than calling np.corrcoef(...) by 2 - 4 ms
# Reason is these variables can be re-used without repeating
# the computation in the np.corrcoef(..) function in the for loop
vec_num = len(vector[0])
x_mean = np.mean(vector[0])
x_lower = np.sqrt(np.sum(vector[0]*vector[0]) - vec_num*x_mean*x_mean)
for i in range(n):
y_mean = np.mean(vectors[i])
y_lower = np.sqrt(np.sum(vectors[i]*vectors[i]) - vec_num*y_mean*y_mean)
x = ( np.dot(vector[0], vectors[i][0]) - vec_num * x_mean * y_mean ) / (x_lower * y_lower)
# x = np.corrcoef(vector[0], vectors[i])[0][1]
if x > score:
score = x
idx = i
name = labels[idx]
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def cosine(vector, vectors, labels, max_elapsed):
'''
Face recognition by searching for the
maximum cosine similarity with the database
Arguments:
1. vector: Input 128-d vector
2. vectors: 128-d vectors from database
3. labels: Identities of 128-d vectors from database
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Optimal value of cosine similarity
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
n = len(labels)
idx = 0
score = -1
total = 0
vector_l2 = np.sqrt(np.sum(vector[0] * vector[0]))
for i in range(n):
vectors_l2 = np.sqrt(np.sum(vectors[i] * vectors[i]))
product_l2 = vector_l2 * vectors_l2
x = np.dot(vector[0], vectors[i][0]) / product_l2
if x > score:
score = x
idx = i
name = labels[idx]
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def l2_distance(vector, vectors, labels, max_elapsed):
'''
Face recognition by searching for the
minimum L2 distance with the database
Arguments:
1. vector: Input 128-d vector
2. vectors: 128-d vectors from database
3. labels: Identities of 128-d vectors from database
4. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Optimal value of L2 distance
3. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
n = len(labels)
idx = 0
score = sys.float_info.max
total = 0
for i in range(n):
diff = vector[0] - vectors[i]
x = np.sqrt( np.sum(diff * diff) )
if x < score:
score = x
idx = i
name = labels[idx]
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
def l1_distance(vector, vectors, labels, max_elapsed):
'''
Face recognition by searching for the
minimum L1 distance with the database
Arguments:
1. vector: Input 128-d vector
2. vectors: 128-d vectors from database
3. labels: Identities of 128-d vectors from database
3. max_elapsed: Maximum time elapsed for this function
Used during video streaming
Returns:
1. name: Identity of this vector
2. score: Optimal value of L1 distance
4. max_elapsed: (same as the 3rd argument)
'''
start = time.time()
n = len(labels)
idx = 0
score = sys.float_info.max
total = 0
for i in range(n):
x = np.sum( np.abs(vector[0] - vectors[i]) )
if x < score:
score = x
idx = i
name = labels[idx]
elapsed = time.time() - start
if elapsed > max_elapsed:
max_elapsed = elapsed
return name, score, max_elapsed
| 21.808696
| 98
| 0.593966
| 1,003
| 7,524
| 4.380857
| 0.12662
| 0.095585
| 0.054165
| 0.02731
| 0.840237
| 0.825671
| 0.81543
| 0.81543
| 0.744197
| 0.715521
| 0
| 0.029337
| 0.315922
| 7,524
| 344
| 99
| 21.872093
| 0.824364
| 0.475279
| 0
| 0.775701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065421
| false
| 0
| 0.037383
| 0
| 0.168224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07e25bf120b6d8b1d614cb3bd1ff9a21d37baed9
| 259
|
py
|
Python
|
ponyexpress/models/__init__.py
|
TelekomCloud/pony-express
|
a825b518687719be5dfe95692008c2129db115cd
|
[
"Apache-2.0"
] | null | null | null |
ponyexpress/models/__init__.py
|
TelekomCloud/pony-express
|
a825b518687719be5dfe95692008c2129db115cd
|
[
"Apache-2.0"
] | null | null | null |
ponyexpress/models/__init__.py
|
TelekomCloud/pony-express
|
a825b518687719be5dfe95692008c2129db115cd
|
[
"Apache-2.0"
] | null | null | null |
from ponyexpress.models.repository import Repository
from ponyexpress.models.repo_history import RepoHistory
from ponyexpress.models.package import Package
from ponyexpress.models.package_history import PackageHistory
from ponyexpress.models.node import Node
| 43.166667
| 61
| 0.88417
| 32
| 259
| 7.09375
| 0.34375
| 0.330396
| 0.462555
| 0.246696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07722
| 259
| 5
| 62
| 51.8
| 0.949791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5807231447c50ebfd72b368fc6b5dcb01b692273
| 8,699
|
py
|
Python
|
tools/test_apps/system/panic/app_test.py
|
lovyan03/esp-idf
|
cd5d30b56a13b8f0933e8879be1f97724a88004a
|
[
"Apache-2.0"
] | 8,747
|
2016-08-18T14:58:24.000Z
|
2022-03-31T20:58:55.000Z
|
tools/test_apps/system/panic/app_test.py
|
lovyan03/esp-idf
|
cd5d30b56a13b8f0933e8879be1f97724a88004a
|
[
"Apache-2.0"
] | 8,603
|
2016-08-20T08:55:56.000Z
|
2022-03-31T23:04:01.000Z
|
tools/test_apps/system/panic/app_test.py
|
lovyan03/esp-idf
|
cd5d30b56a13b8f0933e8879be1f97724a88004a
|
[
"Apache-2.0"
] | 6,380
|
2016-08-18T18:17:00.000Z
|
2022-03-31T22:25:57.000Z
|
#!/usr/bin/env python
import sys
import panic_tests as test
from test_panic_util.test_panic_util import panic_test, run_all
# test_task_wdt
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_task_wdt(env, _extra_data):
test.task_wdt_inner(env, 'panic')
@panic_test()
def test_coredump_task_wdt_uart_elf_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_task_wdt_uart_bin_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_task_wdt_flash_elf_sha(env, _extra_data):
test.task_wdt_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_task_wdt_flash_bin_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_task_wdt(env, _extra_data):
test.task_wdt_inner(env, 'gdbstub')
# test_int_wdt
@panic_test()
def test_panic_int_wdt(env, _extra_data):
test.int_wdt_inner(env, 'panic')
@panic_test()
def test_coredump_int_wdt_uart_elf_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_int_wdt_uart_bin_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_int_wdt_flash_elf_sha(env, _extra_data):
test.int_wdt_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_int_wdt_flash_bin_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_int_wdt(env, _extra_data):
test.int_wdt_inner(env, 'gdbstub')
# test_int_wdt_cache_disabled
@panic_test()
def test_panic_int_wdt_cache_disabled(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'panic')
@panic_test()
def test_coredump_int_wdt_cache_disabled_uart_elf_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_int_wdt_cache_disabled_uart_bin_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_int_wdt_cache_disabled_flash_elf_sha(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_int_wdt_cache_disabled_flash_bin_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_int_wdt_cache_disabled(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'gdbstub')
# test_cache_error
@panic_test()
def test_panic_cache_error(env, _extra_data):
test.cache_error_inner(env, 'panic')
@panic_test()
def test_coredump_cache_error_uart_elf_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_cache_error_uart_bin_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_cache_error_flash_elf_sha(env, _extra_data):
test.cache_error_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_cache_error_flash_bin_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_cache_error(env, _extra_data):
test.cache_error_inner(env, 'gdbstub')
# test_stack_overflow
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_stack_overflow(env, _extra_data):
test.stack_overflow_inner(env, 'panic')
@panic_test()
def test_coredump_stack_overflow_uart_elf_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_stack_overflow_uart_bin_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_stack_overflow_flash_elf_sha(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_stack_overflow_flash_bin_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_stack_overflow(env, _extra_data):
test.stack_overflow_inner(env, 'gdbstub')
# test_instr_fetch_prohibited
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_instr_fetch_prohibited(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'panic')
@panic_test()
def test_coredump_instr_fetch_prohibited_uart_elf_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_instr_fetch_prohibited_uart_bin_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_instr_fetch_prohibited_flash_elf_sha(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_instr_fetch_prohibited_flash_bin_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_instr_fetch_prohibited(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'gdbstub')
# test_illegal_instruction
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_illegal_instruction(env, _extra_data):
test.illegal_instruction_inner(env, 'panic')
@panic_test()
def test_coredump_illegal_instruction_uart_elf_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_illegal_instruction_uart_bin_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_illegal_instruction_flash_elf_sha(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_illegal_instruction_flash_bin_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_illegal_instruction(env, _extra_data):
test.illegal_instruction_inner(env, 'gdbstub')
# test_storeprohibited
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_storeprohibited(env, _extra_data):
test.storeprohibited_inner(env, 'panic')
@panic_test()
def test_coredump_storeprohibited_uart_elf_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_storeprohibited_uart_bin_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_storeprohibited_flash_elf_sha(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_storeprohibited_flash_bin_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_storeprohibited(env, _extra_data):
test.storeprohibited_inner(env, 'gdbstub')
# test_abort
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_abort(env, _extra_data):
test.abort_inner(env, 'panic')
@panic_test(target=['ESP32'])
def test_panic_abort_cache_disabled(env, _extra_data):
test.abort_cached_disabled_inner(env, 'panic')
@panic_test()
def test_coredump_abort_uart_elf_crc(env, _extra_data):
test.abort_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_abort_uart_bin_crc(env, _extra_data):
test.abort_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_abort_flash_elf_sha(env, _extra_data):
test.abort_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_abort_flash_bin_crc(env, _extra_data):
test.abort_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_abort(env, _extra_data):
test.abort_inner(env, 'gdbstub')
# test_assert
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_assert(env, _extra_data):
test.assert_inner(env, 'panic')
@panic_test(target=['ESP32'])
def test_panic_assert_cache_disabled(env, _extra_data):
test.assert_cached_disabled_inner(env, 'panic')
# test_ub
@panic_test()
def test_panic_ub(env, _extra_data):
test.ub_inner(env, 'panic')
@panic_test()
def test_coredump_ub_uart_elf_crc(env, _extra_data):
test.ub_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_ub_uart_bin_crc(env, _extra_data):
test.ub_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_ub_flash_elf_sha(env, _extra_data):
test.ub_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_ub_flash_bin_crc(env, _extra_data):
test.ub_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_ub(env, _extra_data):
test.ub_inner(env, 'gdbstub')
if __name__ == '__main__':
run_all(__file__, sys.argv[1:])
| 25.141618
| 73
| 0.800437
| 1,339
| 8,699
| 4.612397
| 0.038835
| 0.094722
| 0.122409
| 0.163212
| 0.946567
| 0.926975
| 0.917584
| 0.872085
| 0.723122
| 0.674223
| 0
| 0.005077
| 0.094379
| 8,699
| 345
| 74
| 25.214493
| 0.778878
| 0.024945
| 0
| 0.324742
| 0
| 0
| 0.129516
| 0.101535
| 0
| 0
| 0
| 0
| 0.020619
| 1
| 0.324742
| false
| 0
| 0.015464
| 0
| 0.340206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6af2ddb647a5335ec8ffd49dbac7d773181d7135
| 570
|
py
|
Python
|
train_medseg_timm-regnetx_002_grid_distortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_medseg_timm-regnetx_002_grid_distortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_medseg_timm-regnetx_002_grid_distortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold0_grid_distortion.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold1_grid_distortion.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold2_grid_distortion.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold3_grid_distortion.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold4_grid_distortion.yml",
]
for l in ls:
os.system(l)
| 51.818182
| 108
| 0.854386
| 85
| 570
| 5.317647
| 0.294118
| 0.110619
| 0.132743
| 0.210177
| 0.847345
| 0.847345
| 0.847345
| 0.847345
| 0.847345
| 0.847345
| 0
| 0.037106
| 0.054386
| 570
| 11
| 109
| 51.818182
| 0.801484
| 0
| 0
| 0
| 0
| 0
| 0.884413
| 0.665499
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6afc8265a1b3e6021d3984d49f559c16ccaf15d8
| 113,310
|
py
|
Python
|
container/google/cloud/container_v1/gapic/cluster_manager_client.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | 1
|
2019-05-23T11:25:32.000Z
|
2019-05-23T11:25:32.000Z
|
container/google/cloud/container_v1/gapic/cluster_manager_client.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | null | null | null |
container/google/cloud/container_v1/gapic/cluster_manager_client.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.container.v1 ClusterManager API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.container_v1.gapic import cluster_manager_client_config
from google.cloud.container_v1.gapic import enums
from google.cloud.container_v1.gapic.transports import cluster_manager_grpc_transport
from google.cloud.container_v1.proto import cluster_service_pb2
from google.cloud.container_v1.proto import cluster_service_pb2_grpc
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-container', ).version
class ClusterManagerClient(object):
"""Google Container Engine Cluster Manager v1"""
SERVICE_ADDRESS = 'container.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.container.v1.ClusterManager'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=cluster_manager_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.ClusterManagerGrpcTransport,
Callable[[~.Credentials, type], ~.ClusterManagerGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cluster_manager_grpc_transport.
ClusterManagerGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = cluster_manager_grpc_transport.ClusterManagerGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_clusters(self,
project_id,
zone,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists all clusters owned by a project in either the specified zone or all
zones.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.list_clusters(project_id, zone)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides, or "-" for all zones.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListClustersResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_clusters' not in self._inner_api_calls:
self._inner_api_calls[
'list_clusters'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs['ListClusters'].retry,
default_timeout=self._method_configs['ListClusters'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListClustersRequest(
project_id=project_id,
zone=zone,
)
return self._inner_api_calls['list_clusters'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_cluster(self,
project_id,
zone,
cluster_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the details of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.get_cluster(project_id, zone, cluster_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'get_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs['GetCluster'].retry,
default_timeout=self._method_configs['GetCluster'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetClusterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
)
return self._inner_api_calls['get_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
def create_cluster(self,
project_id,
zone,
cluster,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's `default
network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
One firewall is added for the cluster. After cluster creation, the
cluster creates routes for each node to allow the containers on that
node to communicate with all other instances in the cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, zone, cluster)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster (Union[dict, ~google.cloud.container_v1.types.Cluster]): A `cluster
resource <https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters>`__
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.Cluster`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'create_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs['CreateCluster'].retry,
default_timeout=self._method_configs['CreateCluster'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CreateClusterRequest(
project_id=project_id,
zone=zone,
cluster=cluster,
)
return self._inner_api_calls['create_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_cluster(self,
project_id,
zone,
cluster_id,
update,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the settings of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `update`:
>>> update = {}
>>>
>>> response = client.update_cluster(project_id, zone, cluster_id, update)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
update (Union[dict, ~google.cloud.container_v1.types.ClusterUpdate]): A description of the update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.ClusterUpdate`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'update_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_cluster,
default_retry=self._method_configs['UpdateCluster'].retry,
default_timeout=self._method_configs['UpdateCluster'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateClusterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
update=update,
)
return self._inner_api_calls['update_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_node_pool(self,
project_id,
zone,
cluster_id,
node_pool_id,
node_version,
image_type,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the version and/or image type of a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `node_version`:
>>> node_version = ''
>>>
>>> # TODO: Initialize `image_type`:
>>> image_type = ''
>>>
>>> response = client.update_node_pool(project_id, zone, cluster_id, node_pool_id, node_version, image_type)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
node_pool_id (str): The name of the node pool to upgrade.
node_version (str): The Kubernetes version to change the nodes to (typically an upgrade).
Use ``-`` to upgrade to the latest version supported by the server.
image_type (str): The desired image type for the node pool.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_node_pool' not in self._inner_api_calls:
self._inner_api_calls[
'update_node_pool'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_node_pool,
default_retry=self._method_configs['UpdateNodePool'].retry,
default_timeout=self._method_configs['UpdateNodePool'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
node_version=node_version,
image_type=image_type,
)
return self._inner_api_calls['update_node_pool'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_node_pool_autoscaling(
self,
project_id,
zone,
cluster_id,
node_pool_id,
autoscaling,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the autoscaling settings of a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `autoscaling`:
>>> autoscaling = {}
>>>
>>> response = client.set_node_pool_autoscaling(project_id, zone, cluster_id, node_pool_id, autoscaling)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
node_pool_id (str): The name of the node pool to upgrade.
autoscaling (Union[dict, ~google.cloud.container_v1.types.NodePoolAutoscaling]): Autoscaling configuration for the node pool.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodePoolAutoscaling`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_node_pool_autoscaling' not in self._inner_api_calls:
self._inner_api_calls[
'set_node_pool_autoscaling'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_autoscaling,
default_retry=self.
_method_configs['SetNodePoolAutoscaling'].retry,
default_timeout=self.
_method_configs['SetNodePoolAutoscaling'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolAutoscalingRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
autoscaling=autoscaling,
)
return self._inner_api_calls['set_node_pool_autoscaling'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_logging_service(self,
project_id,
zone,
cluster_id,
logging_service,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the logging service of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `logging_service`:
>>> logging_service = ''
>>>
>>> response = client.set_logging_service(project_id, zone, cluster_id, logging_service)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
logging_service (str): The logging service the cluster should use to write metrics. Currently
available options:
- "logging.googleapis.com" - the Google Cloud Logging service
- "none" - no metrics will be exported from the cluster
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_logging_service' not in self._inner_api_calls:
self._inner_api_calls[
'set_logging_service'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_logging_service,
default_retry=self._method_configs['SetLoggingService'].
retry,
default_timeout=self._method_configs['SetLoggingService'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLoggingServiceRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
logging_service=logging_service,
)
return self._inner_api_calls['set_logging_service'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_monitoring_service(self,
project_id,
zone,
cluster_id,
monitoring_service,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the monitoring service of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `monitoring_service`:
>>> monitoring_service = ''
>>>
>>> response = client.set_monitoring_service(project_id, zone, cluster_id, monitoring_service)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
monitoring_service (str): The monitoring service the cluster should use to write metrics.
Currently available options:
- "monitoring.googleapis.com" - the Google Cloud Monitoring service
- "none" - no metrics will be exported from the cluster
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_monitoring_service' not in self._inner_api_calls:
self._inner_api_calls[
'set_monitoring_service'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_monitoring_service,
default_retry=self._method_configs['SetMonitoringService'].
retry,
default_timeout=self.
_method_configs['SetMonitoringService'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMonitoringServiceRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
monitoring_service=monitoring_service,
)
return self._inner_api_calls['set_monitoring_service'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_addons_config(self,
project_id,
zone,
cluster_id,
addons_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the addons of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `addons_config`:
>>> addons_config = {}
>>>
>>> response = client.set_addons_config(project_id, zone, cluster_id, addons_config)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
addons_config (Union[dict, ~google.cloud.container_v1.types.AddonsConfig]): The desired configurations for the various addons available to run in the
cluster.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.AddonsConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_addons_config' not in self._inner_api_calls:
self._inner_api_calls[
'set_addons_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_addons_config,
default_retry=self._method_configs['SetAddonsConfig'].
retry,
default_timeout=self._method_configs['SetAddonsConfig'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetAddonsConfigRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
addons_config=addons_config,
)
return self._inner_api_calls['set_addons_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_locations(self,
project_id,
zone,
cluster_id,
locations,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the locations of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `locations`:
>>> locations = []
>>>
>>> response = client.set_locations(project_id, zone, cluster_id, locations)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
locations (list[str]): The desired list of Google Compute Engine
`locations <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster's nodes should be located. Changing the locations a
cluster is in will result in nodes being either created or removed from
the cluster, depending on whether locations are being added or removed.
This list must always include the cluster's primary zone.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_locations' not in self._inner_api_calls:
self._inner_api_calls[
'set_locations'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_locations,
default_retry=self._method_configs['SetLocations'].retry,
default_timeout=self._method_configs['SetLocations'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLocationsRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
locations=locations,
)
return self._inner_api_calls['set_locations'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_master(self,
project_id,
zone,
cluster_id,
master_version,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the master of a specific cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `master_version`:
>>> master_version = ''
>>>
>>> response = client.update_master(project_id, zone, cluster_id, master_version)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
master_version (str): The Kubernetes version to change the master to. The only valid value is the
latest supported version. Use "-" to have the server automatically select
the latest version.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_master' not in self._inner_api_calls:
self._inner_api_calls[
'update_master'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_master,
default_retry=self._method_configs['UpdateMaster'].retry,
default_timeout=self._method_configs['UpdateMaster'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.UpdateMasterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
master_version=master_version,
)
return self._inner_api_calls['update_master'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_master_auth(self,
project_id,
zone,
cluster_id,
action,
update,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Used to set master auth materials. Currently supports :-
Changing the admin password of a specific cluster.
This can be either via password generation or explicitly set the password.
Example:
>>> from google.cloud import container_v1
>>> from google.cloud.container_v1 import enums
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `action`:
>>> action = enums.SetMasterAuthRequest.Action.UNKNOWN
>>>
>>> # TODO: Initialize `update`:
>>> update = {}
>>>
>>> response = client.set_master_auth(project_id, zone, cluster_id, action, update)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to upgrade.
action (~google.cloud.container_v1.types.Action): The exact form of action to be taken on the master auth.
update (Union[dict, ~google.cloud.container_v1.types.MasterAuth]): A description of the update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.MasterAuth`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_master_auth' not in self._inner_api_calls:
self._inner_api_calls[
'set_master_auth'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_master_auth,
default_retry=self._method_configs['SetMasterAuth'].retry,
default_timeout=self._method_configs['SetMasterAuth'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMasterAuthRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
action=action,
update=update,
)
return self._inner_api_calls['set_master_auth'](
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_cluster(self,
project_id,
zone,
cluster_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.delete_cluster(project_id, zone, cluster_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'delete_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs['DeleteCluster'].retry,
default_timeout=self._method_configs['DeleteCluster'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.DeleteClusterRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
)
return self._inner_api_calls['delete_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_operations(self,
project_id,
zone,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists all operations in a project in a specific zone or all zones.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.list_operations(project_id, zone)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ to
return operations for, or ``-`` for all zones.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListOperationsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_operations' not in self._inner_api_calls:
self._inner_api_calls[
'list_operations'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_operations,
default_retry=self._method_configs['ListOperations'].retry,
default_timeout=self._method_configs['ListOperations'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListOperationsRequest(
project_id=project_id,
zone=zone,
)
return self._inner_api_calls['list_operations'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_operation(self,
project_id,
zone,
operation_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the specified operation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `operation_id`:
>>> operation_id = ''
>>>
>>> response = client.get_operation(project_id, zone, operation_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
operation_id (str): The server-assigned ``name`` of the operation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_operation' not in self._inner_api_calls:
self._inner_api_calls[
'get_operation'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_operation,
default_retry=self._method_configs['GetOperation'].retry,
default_timeout=self._method_configs['GetOperation'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetOperationRequest(
project_id=project_id,
zone=zone,
operation_id=operation_id,
)
return self._inner_api_calls['get_operation'](
request, retry=retry, timeout=timeout, metadata=metadata)
def cancel_operation(self,
project_id,
zone,
operation_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Cancels the specified operation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `operation_id`:
>>> operation_id = ''
>>>
>>> client.cancel_operation(project_id, zone, operation_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the operation resides.
operation_id (str): The server-assigned ``name`` of the operation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'cancel_operation' not in self._inner_api_calls:
self._inner_api_calls[
'cancel_operation'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.cancel_operation,
default_retry=self._method_configs['CancelOperation'].
retry,
default_timeout=self._method_configs['CancelOperation'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CancelOperationRequest(
project_id=project_id,
zone=zone,
operation_id=operation_id,
)
self._inner_api_calls['cancel_operation'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_server_config(self,
project_id,
zone,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns configuration info about the Container Engine service.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> response = client.get_server_config(project_id, zone)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ to
return operations for.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ServerConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_server_config' not in self._inner_api_calls:
self._inner_api_calls[
'get_server_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_server_config,
default_retry=self._method_configs['GetServerConfig'].
retry,
default_timeout=self._method_configs['GetServerConfig'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetServerConfigRequest(
project_id=project_id,
zone=zone,
)
return self._inner_api_calls['get_server_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_node_pools(self,
project_id,
zone,
cluster_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the node pools for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.list_node_pools(project_id, zone, cluster_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.ListNodePoolsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_node_pools' not in self._inner_api_calls:
self._inner_api_calls[
'list_node_pools'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_node_pools,
default_retry=self._method_configs['ListNodePools'].retry,
default_timeout=self._method_configs['ListNodePools'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.ListNodePoolsRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
)
return self._inner_api_calls['list_node_pools'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_node_pool(self,
project_id,
zone,
cluster_id,
node_pool_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the node pool requested.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.get_node_pool(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
node_pool_id (str): The name of the node pool.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.NodePool` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_node_pool' not in self._inner_api_calls:
self._inner_api_calls[
'get_node_pool'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_node_pool,
default_retry=self._method_configs['GetNodePool'].retry,
default_timeout=self._method_configs['GetNodePool'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.GetNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
)
return self._inner_api_calls['get_node_pool'](
request, retry=retry, timeout=timeout, metadata=metadata)
def create_node_pool(self,
project_id,
zone,
cluster_id,
node_pool,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a node pool for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool`:
>>> node_pool = {}
>>>
>>> response = client.create_node_pool(project_id, zone, cluster_id, node_pool)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
node_pool (Union[dict, ~google.cloud.container_v1.types.NodePool]): The node pool to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodePool`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_node_pool' not in self._inner_api_calls:
self._inner_api_calls[
'create_node_pool'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_node_pool,
default_retry=self._method_configs['CreateNodePool'].retry,
default_timeout=self._method_configs['CreateNodePool'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CreateNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool=node_pool,
)
return self._inner_api_calls['create_node_pool'](
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_node_pool(self,
project_id,
zone,
cluster_id,
node_pool_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a node pool from a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.delete_node_pool(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
node_pool_id (str): The name of the node pool to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_node_pool' not in self._inner_api_calls:
self._inner_api_calls[
'delete_node_pool'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_node_pool,
default_retry=self._method_configs['DeleteNodePool'].retry,
default_timeout=self._method_configs['DeleteNodePool'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.DeleteNodePoolRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
)
return self._inner_api_calls['delete_node_pool'](
request, retry=retry, timeout=timeout, metadata=metadata)
def rollback_node_pool_upgrade(
self,
project_id,
zone,
cluster_id,
node_pool_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> response = client.rollback_node_pool_upgrade(project_id, zone, cluster_id, node_pool_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to rollback.
node_pool_id (str): The name of the node pool to rollback.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'rollback_node_pool_upgrade' not in self._inner_api_calls:
self._inner_api_calls[
'rollback_node_pool_upgrade'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.rollback_node_pool_upgrade,
default_retry=self.
_method_configs['RollbackNodePoolUpgrade'].retry,
default_timeout=self.
_method_configs['RollbackNodePoolUpgrade'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.RollbackNodePoolUpgradeRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
)
return self._inner_api_calls['rollback_node_pool_upgrade'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_node_pool_management(
self,
project_id,
zone,
cluster_id,
node_pool_id,
management,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the NodeManagement options for a node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `management`:
>>> management = {}
>>>
>>> response = client.set_node_pool_management(project_id, zone, cluster_id, node_pool_id, management)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to update.
node_pool_id (str): The name of the node pool to update.
management (Union[dict, ~google.cloud.container_v1.types.NodeManagement]): NodeManagement configuration for the node pool.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NodeManagement`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_node_pool_management' not in self._inner_api_calls:
self._inner_api_calls[
'set_node_pool_management'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_management,
default_retry=self.
_method_configs['SetNodePoolManagement'].retry,
default_timeout=self.
_method_configs['SetNodePoolManagement'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolManagementRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
management=management,
)
return self._inner_api_calls['set_node_pool_management'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_labels(self,
project_id,
zone,
cluster_id,
resource_labels,
label_fingerprint,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets labels on a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `resource_labels`:
>>> resource_labels = {}
>>>
>>> # TODO: Initialize `label_fingerprint`:
>>> label_fingerprint = ''
>>>
>>> response = client.set_labels(project_id, zone, cluster_id, resource_labels, label_fingerprint)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
resource_labels (dict[str -> str]): The labels to set for that cluster.
label_fingerprint (str): The fingerprint of the previous set of labels for this resource,
used to detect conflicts. The fingerprint is initially generated by
Container Engine and changes after every request to modify or update
labels. You must always provide an up-to-date fingerprint hash when
updating or changing labels. Make a <code>get()</code> request to the
resource to get the latest fingerprint.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_labels' not in self._inner_api_calls:
self._inner_api_calls[
'set_labels'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_labels,
default_retry=self._method_configs['SetLabels'].retry,
default_timeout=self._method_configs['SetLabels'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLabelsRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
resource_labels=resource_labels,
label_fingerprint=label_fingerprint,
)
return self._inner_api_calls['set_labels'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_legacy_abac(self,
project_id,
zone,
cluster_id,
enabled,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Enables or disables the ABAC authorization mechanism on a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `enabled`:
>>> enabled = False
>>>
>>> response = client.set_legacy_abac(project_id, zone, cluster_id, enabled)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to update.
enabled (bool): Whether ABAC authorization will be enabled in the cluster.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_legacy_abac' not in self._inner_api_calls:
self._inner_api_calls[
'set_legacy_abac'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_legacy_abac,
default_retry=self._method_configs['SetLegacyAbac'].retry,
default_timeout=self._method_configs['SetLegacyAbac'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetLegacyAbacRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
enabled=enabled,
)
return self._inner_api_calls['set_legacy_abac'](
request, retry=retry, timeout=timeout, metadata=metadata)
def start_i_p_rotation(self,
project_id,
zone,
cluster_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Start master IP rotation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.start_i_p_rotation(project_id, zone, cluster_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'start_i_p_rotation' not in self._inner_api_calls:
self._inner_api_calls[
'start_i_p_rotation'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.start_i_p_rotation,
default_retry=self._method_configs['StartIPRotation'].
retry,
default_timeout=self._method_configs['StartIPRotation'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.StartIPRotationRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
)
return self._inner_api_calls['start_i_p_rotation'](
request, retry=retry, timeout=timeout, metadata=metadata)
def complete_i_p_rotation(self,
project_id,
zone,
cluster_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Completes master IP rotation.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> response = client.complete_i_p_rotation(project_id, zone, cluster_id)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'complete_i_p_rotation' not in self._inner_api_calls:
self._inner_api_calls[
'complete_i_p_rotation'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.complete_i_p_rotation,
default_retry=self._method_configs['CompleteIPRotation'].
retry,
default_timeout=self._method_configs['CompleteIPRotation'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.CompleteIPRotationRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
)
return self._inner_api_calls['complete_i_p_rotation'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_node_pool_size(self,
project_id,
zone,
cluster_id,
node_pool_id,
node_count,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the size of a specific node pool.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `node_pool_id`:
>>> node_pool_id = ''
>>>
>>> # TODO: Initialize `node_count`:
>>> node_count = 0
>>>
>>> response = client.set_node_pool_size(project_id, zone, cluster_id, node_pool_id, node_count)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to update.
node_pool_id (str): The name of the node pool to update.
node_count (int): The desired node count for the pool.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_node_pool_size' not in self._inner_api_calls:
self._inner_api_calls[
'set_node_pool_size'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_node_pool_size,
default_retry=self._method_configs['SetNodePoolSize'].
retry,
default_timeout=self._method_configs['SetNodePoolSize'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNodePoolSizeRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
node_pool_id=node_pool_id,
node_count=node_count,
)
return self._inner_api_calls['set_node_pool_size'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_network_policy(self,
project_id,
zone,
cluster_id,
network_policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Enables/Disables Network Policy for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `network_policy`:
>>> network_policy = {}
>>>
>>> response = client.set_network_policy(project_id, zone, cluster_id, network_policy)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster.
network_policy (Union[dict, ~google.cloud.container_v1.types.NetworkPolicy]): Configuration options for the NetworkPolicy feature.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.NetworkPolicy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_network_policy' not in self._inner_api_calls:
self._inner_api_calls[
'set_network_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_network_policy,
default_retry=self._method_configs['SetNetworkPolicy'].
retry,
default_timeout=self._method_configs['SetNetworkPolicy'].
timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetNetworkPolicyRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
network_policy=network_policy,
)
return self._inner_api_calls['set_network_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def set_maintenance_policy(self,
project_id,
zone,
cluster_id,
maintenance_policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Sets the maintenance policy for a cluster.
Example:
>>> from google.cloud import container_v1
>>>
>>> client = container_v1.ClusterManagerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `zone`:
>>> zone = ''
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `maintenance_policy`:
>>> maintenance_policy = {}
>>>
>>> response = client.set_maintenance_policy(project_id, zone, cluster_id, maintenance_policy)
Args:
project_id (str): The Google Developers Console `project ID or project
number <https://support.google.com/cloud/answer/6158840>`__.
zone (str): The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__ in
which the cluster resides.
cluster_id (str): The name of the cluster to update.
maintenance_policy (Union[dict, ~google.cloud.container_v1.types.MaintenancePolicy]): The maintenance policy to be set for the cluster. An empty field
clears the existing maintenance policy.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.container_v1.types.MaintenancePolicy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.container_v1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'set_maintenance_policy' not in self._inner_api_calls:
self._inner_api_calls[
'set_maintenance_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_maintenance_policy,
default_retry=self._method_configs['SetMaintenancePolicy'].
retry,
default_timeout=self.
_method_configs['SetMaintenancePolicy'].timeout,
client_info=self._client_info,
)
request = cluster_service_pb2.SetMaintenancePolicyRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_id,
maintenance_policy=maintenance_policy,
)
return self._inner_api_calls['set_maintenance_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
| 45.089534
| 162
| 0.566137
| 11,786
| 113,310
| 5.260903
| 0.046411
| 0.034836
| 0.039207
| 0.027869
| 0.839368
| 0.827111
| 0.786711
| 0.760648
| 0.749988
| 0.729264
| 0
| 0.005642
| 0.350887
| 113,310
| 2,512
| 163
| 45.107484
| 0.837378
| 0.558071
| 0
| 0.512852
| 0
| 0
| 0.06923
| 0.016454
| 0
| 0
| 0
| 0.044984
| 0
| 1
| 0.039168
| false
| 0
| 0.017136
| 0
| 0.097919
| 0.002448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed17f452a3de05f25a4e6ed6cf668d1b6bcea5cb
| 649
|
py
|
Python
|
lng/VeroptBRV320AOC/pythonpath/lightproof_opts_pt_BR.py
|
alexandre-archive/editor
|
dfb2223b0d390b5118ccf5bb8a523c1a61974615
|
[
"MIT"
] | 1
|
2016-07-15T01:21:35.000Z
|
2016-07-15T01:21:35.000Z
|
lng/VeroptBRV320AOC/pythonpath/lightproof_opts_pt_BR.py
|
alexandre-archive/editor
|
dfb2223b0d390b5118ccf5bb8a523c1a61974615
|
[
"MIT"
] | null | null | null |
lng/VeroptBRV320AOC/pythonpath/lightproof_opts_pt_BR.py
|
alexandre-archive/editor
|
dfb2223b0d390b5118ccf5bb8a523c1a61974615
|
[
"MIT"
] | null | null | null |
lopts = {}
lopts_default = {}
lopts['pt_BR'] = [u'grammar', u'cap', u'dup', u'pair', u'spaces', u'mdash', u'quotation', u'times', u'spaces2', u'ndash', u'apostrophe', u'ellipsis', u'spaces3', u'minus', u'metric', u'numsep', u'nonmetric', u'paronimo', u'composto', u'mmalmau', u'aha', u'meiameio', u'verbo', u'pronominal', u'pronome', u'porque']
lopts_default['pt_BR'] = [u'grammar', u'cap', u'dup', u'pair', u'spaces', u'mdash', u'quotation', u'spaces2', u'ndash', u'apostrophe', u'ellipsis', u'spaces3', u'metric', u'numsep', u'nonmetric', u'paronimo', u'composto', u'mmalmau', u'aha', u'meiameio', u'verbo', u'pronominal', u'pronome', u'porque']
| 129.8
| 314
| 0.647149
| 111
| 649
| 3.756757
| 0.288288
| 0.057554
| 0.023981
| 0.057554
| 0.892086
| 0.892086
| 0.892086
| 0.892086
| 0.892086
| 0.892086
| 0
| 0.006791
| 0.09245
| 649
| 4
| 315
| 162.25
| 0.699491
| 0
| 0
| 0
| 0
| 0
| 0.515432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ed5a3d74d808ff201669d5f351d74751e6647cd7
| 131
|
py
|
Python
|
grobber/index_scraper/__init__.py
|
MyAnimeStream/grobber
|
ced4cd2632f70dacd61d3355cb184d3e19b50996
|
[
"MIT"
] | 2
|
2021-05-16T03:56:27.000Z
|
2021-12-17T05:45:51.000Z
|
grobber/index_scraper/__init__.py
|
myanimestream/grobber
|
ced4cd2632f70dacd61d3355cb184d3e19b50996
|
[
"MIT"
] | 2
|
2021-06-01T23:32:54.000Z
|
2021-12-13T19:58:31.000Z
|
grobber/index_scraper/__init__.py
|
myanimestream/grobber
|
ced4cd2632f70dacd61d3355cb184d3e19b50996
|
[
"MIT"
] | 1
|
2018-12-29T14:11:32.000Z
|
2018-12-29T14:11:32.000Z
|
from .common import *
from .index_scrapers import *
from .medium import *
from .medium_access import *
from .medium_group import *
| 21.833333
| 29
| 0.770992
| 18
| 131
| 5.444444
| 0.444444
| 0.408163
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 5
| 30
| 26.2
| 0.882883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ed686ecdda838bf6a7901337445688737d54bc39
| 153
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import test_crm_lead
import test_new_lead_notification
import test_lead2opportunity
import test_crm_activity
import test_crm_ui
| 19.125
| 33
| 0.836601
| 23
| 153
| 5.130435
| 0.521739
| 0.423729
| 0.330508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0.104575
| 153
| 7
| 34
| 21.857143
| 0.846715
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ed83c0511fa1be33a7e30ce1fa57749105ae1936
| 6,743
|
py
|
Python
|
projects/migrations/0001_initial.py
|
zachsnyder1/zachsite
|
2a6d750d8aef7786d1f3c647be62aea7cbdd29c5
|
[
"MIT"
] | null | null | null |
projects/migrations/0001_initial.py
|
zachsnyder1/zachsite
|
2a6d750d8aef7786d1f3c647be62aea7cbdd29c5
|
[
"MIT"
] | null | null | null |
projects/migrations/0001_initial.py
|
zachsnyder1/zachsite
|
2a6d750d8aef7786d1f3c647be62aea7cbdd29c5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CodeExample',
fields=[
('id', models.AutoField(serialize=False,
primary_key=True, verbose_name='ID', auto_created=True)),
('codetext', models.TextField()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(serialize=False,
primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(max_length=30)),
('slug', models.SlugField(max_length=30)),
('active', models.BooleanField(default=True)),
('summary', models.TextField()),
],
),
migrations.CreateModel(
name='SymbolEntity',
fields=[
('id', models.AutoField(serialize=False,
primary_key=True, verbose_name='ID', auto_created=True)),
('symbol', models.CharField(max_length=120)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='ClassMethod',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='ClassVariable',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='ConstructorArg',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
('default', models.CharField(max_length=120, blank=True)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='InstanceVariable',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='MethodArg',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
('default', models.CharField(max_length=120, blank=True)),
('method', models.ForeignKey(to='projects.ClassMethod', on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='MethodReturn',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
('method', models.ForeignKey(to='projects.ClassMethod', on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='ProjClass',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.CreateModel(
name='ProjModule',
fields=[
('symbolentity_ptr', models.OneToOneField(serialize=False, primary_key=True,
parent_link=True, to='projects.SymbolEntity',
auto_created=True, on_delete=models.CASCADE)),
('path', models.CharField(max_length=300)),
('project', models.ForeignKey(to='projects.Project', on_delete=models.CASCADE)),
],
bases=('projects.symbolentity',),
),
migrations.AddField(
model_name='codeexample',
name='project',
field=models.ForeignKey(to='projects.Project', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='projclass',
name='module',
field=models.ForeignKey(to='projects.ProjModule', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='instancevariable',
name='pclass',
field=models.ForeignKey(to='projects.ProjClass', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='constructorarg',
name='pclass',
field=models.ForeignKey(to='projects.ProjClass', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='classvariable',
name='pclass',
field=models.ForeignKey(to='projects.ProjClass', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='classmethod',
name='pclass',
field=models.ForeignKey(to='projects.ProjClass', on_delete=models.CASCADE),
),
]
| 44.953333
| 104
| 0.493697
| 503
| 6,743
| 6.469185
| 0.151093
| 0.052243
| 0.073141
| 0.109711
| 0.825753
| 0.769514
| 0.754149
| 0.754149
| 0.720344
| 0.687154
| 0
| 0.004162
| 0.394187
| 6,743
| 149
| 105
| 45.255034
| 0.792411
| 0.003114
| 0
| 0.741259
| 0
| 0
| 0.14256
| 0.05
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013986
| 0
| 0.034965
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed865758cfe794c4f7208d2ed5d8aeffced2ab18
| 37,691
|
py
|
Python
|
A_source_code/generalcode/test_allocation.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
A_source_code/generalcode/test_allocation.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
A_source_code/generalcode/test_allocation.py
|
vanHoek-dgnm/CARBON-DISC
|
3ecd5f4efba5e032d43679ee977064d6b25154a9
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ******************************************************
## Revision "$LastChangedDate: 2018-06-01 15:05:44 +0200 (Fri, 01 Jun 2018) $"
## Date "$LastChangedRevision: 1 $"
## Author "$LastChangedBy: arthurbeusen $"
## URL "$HeadURL: https://pbl.sliksvn.com/generalcode/test_allocation.py $"
# ******************************************************
'''
Test script to test the functionality of allocation functions.
'''
import os
import sys
__general = os.path.join(os.getcwd(), 'trunk')
if os.path.exists(__general):
sys.path.insert(0, __general)
print(__general + " is added to the python search path for modules.")
import allocranking
import allocweighing
#def allocranking(sq,sw,wReg,qmaxReg):
#===================================================================================
#INPUT (All input is changed during this function)
# sq Regional sum of values of the variable that must be allocated
# sw Regional sum of values of weighing factor
# wReg Weighting factor grid map
# qmaxReg Maximum value of allocation variable per grid cell
# Test 1
sq = 100.
wReg = [1.,1.,2.,1.5]
sw = sum(wReg)
qmaxReg = [10.,10.,75.,50.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [0.0,0.0,75.0,25.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 1 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Allocation ranking method")
print(qReg)
print(qReg_exp)
# Test 2
sq = 100.
wReg = [1.,1.,2.,1.]
sw = sum(wReg)
qmaxReg = [100.,20.,10.,50.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [20.0,20.0,10.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 2 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Allocation ranking method 2")
print(qReg)
print(qReg_exp)
# Test 3
sq = 100.
wReg = [1.,1.,2.,1.]
sw = sum(wReg)
qmaxReg = [1.,0.,0.,0.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [1.0,0.0,0.0,0.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 3 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Allocation ranking method 3")
print(qReg)
print(qReg_exp)
# Testing allocweighing
print("Start allocation weighing method testing.")
# Test 1
sq = 100.
wReg = [1.,1.,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,100.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [10.0,10.0,20.0,60.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 1 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Allocation weighing method")
print(qReg)
print(qReg_exp)
# Test 2
sq = 100.
wReg = [1.,1.,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,50.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [12.5,12.5,25.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 2 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Allocation weighing method 2")
print(qReg)
print(qReg_exp)
# Test 3
sq = 80.
wReg = [0,0,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,50.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [0,0,30.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 3 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Allocation weighing method 3")
print(qReg)
print(qReg_exp)
sys.exit(12)
# First three tests are on a grid with no nodata
# Test 1
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 1 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 2
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 2 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 3
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 3 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First three tests are on a grid with no nodata. Now with grid with no nodata but in the header the nodata_value specified.
# Test 4
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 4 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test4 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 5
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 5 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test5 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 6
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 6 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test6 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First six tests are on a grid with no nodata. Now with grid with nodata.
# Test 7
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 7 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test7 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 8
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 8 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test8 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 9
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 9 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test9 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 10
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 10 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test10 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 11
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 11 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test11 passed.")
else:
print("Multiplying with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 12
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 12 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test12 passed.")
else:
print("Multiplying with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 13
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 13 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test13 passed.")
else:
print("Multiplying with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 14
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 14 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test14 passed.")
else:
print("Multiplying with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 15
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 15 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test15 passed.")
else:
print("Multiplying with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Now tests for the summation of two objects (grid or scalar).
# First three tests are on a grid with no nodata
# Test 1
# Add grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 1 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Sum with factor 0")
print(grid1_old.values)
print(grid1.values)
# Test 2
# Add grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 2 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Sum with factor 0.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 3
# Add grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,0.0)
grid1.add(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 3 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Adding with another grid with 0.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First three tests are on a grid with no nodata. Now with grid with no nodata but in the header the nodata_value specified.
# Test 4
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 4 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test4 passed.")
else:
print("Sum with factor 0")
print(grid1_old.values)
print(grid1.values)
# Test 5
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 5 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test5 passed.")
else:
print("Sum with factor 0.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 6
# Sum grid1 with a grid of zeros
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,0.0)
grid1.add(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 6 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test6 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First six tests are on a grid with no nodata. Now with grid with nodata.
# Test 7
# Sum grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 7 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test7 passed.")
else:
print("Sum with factor 0")
print(grid1_old.values)
print(grid1.values)
# Test 8
# Sum grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 8 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test8 passed.")
else:
print("Summing with factor 0.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 9
# Sum grid1 with a grid of zeros
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 0.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,0.0)
grid1.add(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 9 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test9 passed.")
else:
print("Sum with another grid with 0.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 10
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 10 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test10 passed.")
else:
print("Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 11
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 11 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test11 passed.")
else:
print("Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 12
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 12 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test12 passed.")
else:
print("Summing with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 13
# Sum grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 13 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test13 passed.")
else:
print("Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 14
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 14 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test14 passed.")
else:
print("Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 15
# Sum grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1+val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 15 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test15 passed.")
else:
print("Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Check range checkers
# Test 1
# Sum grid1 with nodata with a grid with no nodata
# Read ascii grid
xmin=4
xmax=7
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone,minimum= xmin, maximum=xmax)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
val3 = val1+val2
if (val3 < xmin): val3 = xmin
if (val3 > xmax): val3 = xmax
grid1_old.set_data(i,val3)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 1 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Range checker. Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 2
# Sum grid1 with nodata with a grid with nodata
# Read ascii grid
xmin=4
xmax=7
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid4.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone,minimum= xmin, maximum=xmax)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
val3 = val1+val2
if (val3 < xmin): val3 = xmin
if (val3 > xmax): val3 = xmax
grid1_old.set_data(i,val3)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 2 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Range checker. Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 3
# Sum grid1 with nodata with a grid with nodata
# Read ascii grid
xmin=4
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid4.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone,minimum= xmin)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
val3 = val1+val2
if (val3 < xmin): val3 = xmin
grid1_old.set_data(i,val3)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 3 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Range checker. Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 4
# Sum grid1 with nodata with a grid with nodata
# Read ascii grid
xmax=7
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid4.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.add(gridone, maximum=xmax)
# Grid1 must have the sum of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
val3 = val1+val2
if (val3 > xmax): val3 = xmax
grid1_old.set_data(i,val3)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 4 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test4 passed.")
else:
print("Range checker. Sum with another grid. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 1 with divide
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.divide(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 1 divide is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Divide Test1 passed.")
else:
print("Divide with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 2 with divide
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = ascraster.duplicategrid(grid1)
grid1.divide(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1.length):
if (grid1.values[i] != 1.0):
ltest1 = 0
print('Test 2 divide is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " Must be 1.0 ")
if (ltest1 == 1):
print("Divide Test2 passed.")
else:
print("Divide with itself")
print(grid1.values)
# Test 3 with divide
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = ascraster.duplicategrid(grid1)
grid1.divide(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1.length):
val = grid1.get_data(i)
if (val != None):
if (grid1.values[i] != 1.0):
ltest1 = 0
print('Test 3 divide is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " Must be 1.0 ")
if (ltest1 == 1):
print("Divide Test3 passed.")
else:
print("Divide with itself")
print(grid1.values)
# Test 4 with divide
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid2 = ascraster.Asciigrid(ascii_file='testgrid5.asc',numtype=int)
grid1.divide(grid2,default_nodata_value=-12)
ltest1 = 1
if (grid1.nodata_value != -12):
print('Test 4 divide is not a succes. Setting of nodata goes wrong.')
ltest1 = 0
for i in range(grid1.length):
val = grid1.get_data(i)
if (val != None):
if (grid1.values[i] != 1.0):
ltest1 = 0
print('Test 4 divide is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " Must be 1.0 ")
if (ltest1 == 1):
print("Divide Test4 passed.")
else:
print("Divide with itself")
print(grid1.values)
# Test 5 with divide
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid2 = ascraster.Asciigrid(ascii_file='testgrid6.asc',numtype=int)
grid1.divide(grid2,default_nodata_value=-12)
ltest1 = 1
if (grid1.nodata_value != -12):
print('Test 5 divide is not a succes. Setting of nodata goes wrong.')
ltest1 = 0
for i in range(grid1.length):
val = grid1.get_data(i)
if (val != None):
if (grid1.values[i] != 1.0):
ltest1 = 0
print('Test 5 divide is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " Must be 1.0 ")
if (ltest1 == 1):
print("Divide Test5 passed.")
else:
print("Divide with itself")
print(grid1.values)
| 33.864331
| 147
| 0.675201
| 5,898
| 37,691
| 4.236351
| 0.036962
| 0.088369
| 0.058833
| 0.042024
| 0.966741
| 0.961979
| 0.959938
| 0.956656
| 0.955455
| 0.949332
| 0
| 0.050118
| 0.189515
| 37,691
| 1,112
| 148
| 33.894784
| 0.767808
| 0.165345
| 0
| 0.940547
| 0
| 0
| 0.193991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.053508
| 0.004756
| 0
| 0.004756
| 0.267539
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
9c090de9560b5ee09cf70cc743cc5efe66d2e8dd
| 114,284
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_router_bgp.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_router_bgp.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_router_bgp.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_bgp
short_description: Configure BGP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and bgp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
router_bgp:
description:
- Configure BGP.
default: null
type: dict
suboptions:
admin_distance:
description:
- Administrative distance modifications.
type: list
suboptions:
distance:
description:
- Administrative distance to apply (1 - 255).
type: int
id:
description:
- ID.
required: true
type: int
neighbour_prefix:
description:
- Neighbor address prefix.
type: str
route_list:
description:
- Access list of routes to apply new distance to. Source router.access-list.name.
type: str
aggregate_address:
description:
- BGP aggregate address table.
type: list
suboptions:
as_set:
description:
- Enable/disable generate AS set path information.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix:
description:
- Aggregate prefix.
type: str
summary_only:
description:
- Enable/disable filter more specific routes from updates.
type: str
choices:
- enable
- disable
aggregate_address6:
description:
- BGP IPv6 aggregate address table.
type: list
suboptions:
as_set:
description:
- Enable/disable generate AS set path information.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix6:
description:
- Aggregate IPv6 prefix.
type: str
summary_only:
description:
- Enable/disable filter more specific routes from updates.
type: str
choices:
- enable
- disable
always_compare_med:
description:
- Enable/disable always compare MED.
type: str
choices:
- enable
- disable
as:
description:
- Router AS number, valid from 1 to 4294967295, 0 to disable BGP.
type: int
bestpath_as_path_ignore:
description:
- Enable/disable ignore AS path.
type: str
choices:
- enable
- disable
bestpath_cmp_confed_aspath:
description:
- Enable/disable compare federation AS path length.
type: str
choices:
- enable
- disable
bestpath_cmp_routerid:
description:
- Enable/disable compare router ID for identical EBGP paths.
type: str
choices:
- enable
- disable
bestpath_med_confed:
description:
- Enable/disable compare MED among confederation paths.
type: str
choices:
- enable
- disable
bestpath_med_missing_as_worst:
description:
- Enable/disable treat missing MED as least preferred.
type: str
choices:
- enable
- disable
client_to_client_reflection:
description:
- Enable/disable client-to-client route reflection.
type: str
choices:
- enable
- disable
cluster_id:
description:
- Route reflector cluster ID.
type: str
confederation_identifier:
description:
- Confederation identifier.
type: int
confederation_peers:
description:
- Confederation peers.
type: list
suboptions:
peer:
description:
- Peer ID.
required: true
type: str
dampening:
description:
- Enable/disable route-flap dampening.
type: str
choices:
- enable
- disable
dampening_max_suppress_time:
description:
- Maximum minutes a route can be suppressed.
type: int
dampening_reachability_half_life:
description:
- Reachability half-life time for penalty (min).
type: int
dampening_reuse:
description:
- Threshold to reuse routes.
type: int
dampening_route_map:
description:
- Criteria for dampening. Source router.route-map.name.
type: str
dampening_suppress:
description:
- Threshold to suppress routes.
type: int
dampening_unreachability_half_life:
description:
- Unreachability half-life time for penalty (min).
type: int
default_local_preference:
description:
- Default local preference.
type: int
deterministic_med:
description:
- Enable/disable enforce deterministic comparison of MED.
type: str
choices:
- enable
- disable
distance_external:
description:
- Distance for routes external to the AS.
type: int
distance_internal:
description:
- Distance for routes internal to the AS.
type: int
distance_local:
description:
- Distance for routes local to the AS.
type: int
ebgp_multipath:
description:
- Enable/disable EBGP multi-path.
type: str
choices:
- enable
- disable
enforce_first_as:
description:
- Enable/disable enforce first AS for EBGP routes.
type: str
choices:
- enable
- disable
fast_external_failover:
description:
- Enable/disable reset peer BGP session if link goes down.
type: str
choices:
- enable
- disable
graceful_end_on_timer:
description:
- Enable/disable to exit graceful restart on timer only.
type: str
choices:
- enable
- disable
graceful_restart:
description:
- Enable/disable BGP graceful restart capabilities.
type: str
choices:
- enable
- disable
graceful_restart_time:
description:
- Time needed for neighbors to restart (sec).
type: int
graceful_stalepath_time:
description:
- Time to hold stale paths of restarting neighbor (sec).
type: int
graceful_update_delay:
description:
- Route advertisement/selection delay after restart (sec).
type: int
holdtime_timer:
description:
- Number of seconds to mark peer as dead.
type: int
ibgp_multipath:
description:
- Enable/disable IBGP multi-path.
type: str
choices:
- enable
- disable
ignore_optional_capability:
description:
- Don't send unknown optional capability notification message
type: str
choices:
- enable
- disable
keepalive_timer:
description:
- Frequency to send keep alive requests.
type: int
log_neighbour_changes:
description:
- Enable logging of BGP neighbour's changes
type: str
choices:
- enable
- disable
neighbor:
description:
- BGP neighbor table.
type: list
suboptions:
activate:
description:
- Enable/disable address family IPv4 for this neighbor.
type: str
choices:
- enable
- disable
activate6:
description:
- Enable/disable address family IPv6 for this neighbor.
type: str
choices:
- enable
- disable
advertisement_interval:
description:
- Minimum interval (sec) between sending updates.
type: int
allowas_in:
description:
- IPv4 The maximum number of occurrence of my AS number allowed.
type: int
allowas_in_enable:
description:
- Enable/disable IPv4 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in_enable6:
description:
- Enable/disable IPv6 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in6:
description:
- IPv6 The maximum number of occurrence of my AS number allowed.
type: int
as_override:
description:
- Enable/disable replace peer AS with own AS for IPv4.
type: str
choices:
- enable
- disable
as_override6:
description:
- Enable/disable replace peer AS with own AS for IPv6.
type: str
choices:
- enable
- disable
attribute_unchanged:
description:
- IPv4 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
attribute_unchanged6:
description:
- IPv6 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
bfd:
description:
- Enable/disable BFD for this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate:
description:
- Enable/disable advertise default IPv4 route to this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate6:
description:
- Enable/disable advertise default IPv6 route to this neighbor.
type: str
choices:
- enable
- disable
capability_dynamic:
description:
- Enable/disable advertise dynamic capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart:
description:
- Enable/disable advertise IPv4 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart6:
description:
- Enable/disable advertise IPv6 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_orf:
description:
- Accept/Send IPv4 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_orf6:
description:
- Accept/Send IPv6 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_route_refresh:
description:
- Enable/disable advertise route refresh capability to this neighbor.
type: str
choices:
- enable
- disable
conditional_advertise:
description:
- Conditional advertisement.
type: list
suboptions:
advertise_routemap:
description:
- Name of advertising route map. Source router.route-map.name.
type: str
condition_routemap:
description:
- Name of condition route map. Source router.route-map.name.
type: str
condition_type:
description:
- Type of condition.
type: str
choices:
- exist
- non-exist
connect_timer:
description:
- Interval (sec) for connect timer.
type: int
default_originate_routemap:
description:
- Route map to specify criteria to originate IPv4 default. Source router.route-map.name.
type: str
default_originate_routemap6:
description:
- Route map to specify criteria to originate IPv6 default. Source router.route-map.name.
type: str
description:
description:
- Description.
type: str
distribute_list_in:
description:
- Filter for IPv4 updates from this neighbor. Source router.access-list.name.
type: str
distribute_list_in6:
description:
- Filter for IPv6 updates from this neighbor. Source router.access-list6.name.
type: str
distribute_list_out:
description:
- Filter for IPv4 updates to this neighbor. Source router.access-list.name.
type: str
distribute_list_out6:
description:
- Filter for IPv6 updates to this neighbor. Source router.access-list6.name.
type: str
dont_capability_negotiate:
description:
- Don't negotiate capabilities with this neighbor
type: str
choices:
- enable
- disable
ebgp_enforce_multihop:
description:
- Enable/disable allow multi-hop EBGP neighbors.
type: str
choices:
- enable
- disable
ebgp_multihop_ttl:
description:
- EBGP multihop TTL for this peer.
type: int
filter_list_in:
description:
- BGP filter for IPv4 inbound routes. Source router.aspath-list.name.
type: str
filter_list_in6:
description:
- BGP filter for IPv6 inbound routes. Source router.aspath-list.name.
type: str
filter_list_out:
description:
- BGP filter for IPv4 outbound routes. Source router.aspath-list.name.
type: str
filter_list_out6:
description:
- BGP filter for IPv6 outbound routes. Source router.aspath-list.name.
type: str
holdtime_timer:
description:
- Interval (sec) before peer considered dead.
type: int
interface:
description:
- Interface Source system.interface.name.
type: str
ip:
description:
- IP/IPv6 address of neighbor.
required: true
type: str
keep_alive_timer:
description:
- Keep alive timer interval (sec).
type: int
link_down_failover:
description:
- Enable/disable failover upon link down.
type: str
choices:
- enable
- disable
local_as:
description:
- Local AS number of neighbor.
type: int
local_as_no_prepend:
description:
- Do not prepend local-as to incoming updates.
type: str
choices:
- enable
- disable
local_as_replace_as:
description:
- Replace real AS with local-as in outgoing updates.
type: str
choices:
- enable
- disable
maximum_prefix:
description:
- Maximum number of IPv4 prefixes to accept from this peer.
type: int
maximum_prefix_threshold:
description:
- Maximum IPv4 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_threshold6:
description:
- Maximum IPv6 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_warning_only:
description:
- Enable/disable IPv4 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix_warning_only6:
description:
- Enable/disable IPv6 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix6:
description:
- Maximum number of IPv6 prefixes to accept from this peer.
type: int
next_hop_self:
description:
- Enable/disable IPv4 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
next_hop_self6:
description:
- Enable/disable IPv6 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
override_capability:
description:
- Enable/disable override result of capability negotiation.
type: str
choices:
- enable
- disable
passive:
description:
- Enable/disable sending of open messages to this neighbor.
type: str
choices:
- enable
- disable
password:
description:
- Password used in MD5 authentication.
type: str
prefix_list_in:
description:
- IPv4 Inbound filter for updates from this neighbor. Source router.prefix-list.name.
type: str
prefix_list_in6:
description:
- IPv6 Inbound filter for updates from this neighbor. Source router.prefix-list6.name.
type: str
prefix_list_out:
description:
- IPv4 Outbound filter for updates to this neighbor. Source router.prefix-list.name.
type: str
prefix_list_out6:
description:
- IPv6 Outbound filter for updates to this neighbor. Source router.prefix-list6.name.
type: str
remote_as:
description:
- AS number of neighbor.
type: int
remove_private_as:
description:
- Enable/disable remove private AS number from IPv4 outbound updates.
type: str
choices:
- enable
- disable
remove_private_as6:
description:
- Enable/disable remove private AS number from IPv6 outbound updates.
type: str
choices:
- enable
- disable
restart_time:
description:
- Graceful restart delay time (sec, 0 = global default).
type: int
retain_stale_time:
description:
- Time to retain stale routes.
type: int
route_map_in:
description:
- IPv4 Inbound route map filter. Source router.route-map.name.
type: str
route_map_in6:
description:
- IPv6 Inbound route map filter. Source router.route-map.name.
type: str
route_map_out:
description:
- IPv4 Outbound route map filter. Source router.route-map.name.
type: str
route_map_out6:
description:
- IPv6 Outbound route map filter. Source router.route-map.name.
type: str
route_reflector_client:
description:
- Enable/disable IPv4 AS route reflector client.
type: str
choices:
- enable
- disable
route_reflector_client6:
description:
- Enable/disable IPv6 AS route reflector client.
type: str
choices:
- enable
- disable
route_server_client:
description:
- Enable/disable IPv4 AS route server client.
type: str
choices:
- enable
- disable
route_server_client6:
description:
- Enable/disable IPv6 AS route server client.
type: str
choices:
- enable
- disable
send_community:
description:
- IPv4 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
send_community6:
description:
- IPv6 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
shutdown:
description:
- Enable/disable shutdown this neighbor.
type: str
choices:
- enable
- disable
soft_reconfiguration:
description:
- Enable/disable allow IPv4 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
soft_reconfiguration6:
description:
- Enable/disable allow IPv6 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
stale_route:
description:
- Enable/disable stale route after neighbor down.
type: str
choices:
- enable
- disable
strict_capability_match:
description:
- Enable/disable strict capability matching.
type: str
choices:
- enable
- disable
unsuppress_map:
description:
- IPv4 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
unsuppress_map6:
description:
- IPv6 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
update_source:
description:
- Interface to use as source IP/IPv6 address of TCP connections. Source system.interface.name.
type: str
weight:
description:
- Neighbor weight.
type: int
neighbor_group:
description:
- BGP neighbor group table.
type: list
suboptions:
activate:
description:
- Enable/disable address family IPv4 for this neighbor.
type: str
choices:
- enable
- disable
activate6:
description:
- Enable/disable address family IPv6 for this neighbor.
type: str
choices:
- enable
- disable
advertisement_interval:
description:
- Minimum interval (sec) between sending updates.
type: int
allowas_in:
description:
- IPv4 The maximum number of occurrence of my AS number allowed.
type: int
allowas_in_enable:
description:
- Enable/disable IPv4 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in_enable6:
description:
- Enable/disable IPv6 Enable to allow my AS in AS path.
type: str
choices:
- enable
- disable
allowas_in6:
description:
- IPv6 The maximum number of occurrence of my AS number allowed.
type: int
as_override:
description:
- Enable/disable replace peer AS with own AS for IPv4.
type: str
choices:
- enable
- disable
as_override6:
description:
- Enable/disable replace peer AS with own AS for IPv6.
type: str
choices:
- enable
- disable
attribute_unchanged:
description:
- IPv4 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
attribute_unchanged6:
description:
- IPv6 List of attributes that should be unchanged.
type: str
choices:
- as-path
- med
- next-hop
bfd:
description:
- Enable/disable BFD for this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate:
description:
- Enable/disable advertise default IPv4 route to this neighbor.
type: str
choices:
- enable
- disable
capability_default_originate6:
description:
- Enable/disable advertise default IPv6 route to this neighbor.
type: str
choices:
- enable
- disable
capability_dynamic:
description:
- Enable/disable advertise dynamic capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart:
description:
- Enable/disable advertise IPv4 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_graceful_restart6:
description:
- Enable/disable advertise IPv6 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_orf:
description:
- Accept/Send IPv4 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_orf6:
description:
- Accept/Send IPv6 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_route_refresh:
description:
- Enable/disable advertise route refresh capability to this neighbor.
type: str
choices:
- enable
- disable
connect_timer:
description:
- Interval (sec) for connect timer.
type: int
default_originate_routemap:
description:
- Route map to specify criteria to originate IPv4 default. Source router.route-map.name.
type: str
default_originate_routemap6:
description:
- Route map to specify criteria to originate IPv6 default. Source router.route-map.name.
type: str
description:
description:
- Description.
type: str
distribute_list_in:
description:
- Filter for IPv4 updates from this neighbor. Source router.access-list.name.
type: str
distribute_list_in6:
description:
- Filter for IPv6 updates from this neighbor. Source router.access-list6.name.
type: str
distribute_list_out:
description:
- Filter for IPv4 updates to this neighbor. Source router.access-list.name.
type: str
distribute_list_out6:
description:
- Filter for IPv6 updates to this neighbor. Source router.access-list6.name.
type: str
dont_capability_negotiate:
description:
- Don't negotiate capabilities with this neighbor
type: str
choices:
- enable
- disable
ebgp_enforce_multihop:
description:
- Enable/disable allow multi-hop EBGP neighbors.
type: str
choices:
- enable
- disable
ebgp_multihop_ttl:
description:
- EBGP multihop TTL for this peer.
type: int
filter_list_in:
description:
- BGP filter for IPv4 inbound routes. Source router.aspath-list.name.
type: str
filter_list_in6:
description:
- BGP filter for IPv6 inbound routes. Source router.aspath-list.name.
type: str
filter_list_out:
description:
- BGP filter for IPv4 outbound routes. Source router.aspath-list.name.
type: str
filter_list_out6:
description:
- BGP filter for IPv6 outbound routes. Source router.aspath-list.name.
type: str
holdtime_timer:
description:
- Interval (sec) before peer considered dead.
type: int
interface:
description:
- Interface Source system.interface.name.
type: str
keep_alive_timer:
description:
- Keep alive timer interval (sec).
type: int
link_down_failover:
description:
- Enable/disable failover upon link down.
type: str
choices:
- enable
- disable
local_as:
description:
- Local AS number of neighbor.
type: int
local_as_no_prepend:
description:
- Do not prepend local-as to incoming updates.
type: str
choices:
- enable
- disable
local_as_replace_as:
description:
- Replace real AS with local-as in outgoing updates.
type: str
choices:
- enable
- disable
maximum_prefix:
description:
- Maximum number of IPv4 prefixes to accept from this peer.
type: int
maximum_prefix_threshold:
description:
- Maximum IPv4 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_threshold6:
description:
- Maximum IPv6 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_warning_only:
description:
- Enable/disable IPv4 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix_warning_only6:
description:
- Enable/disable IPv6 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix6:
description:
- Maximum number of IPv6 prefixes to accept from this peer.
type: int
name:
description:
- Neighbor group name.
required: true
type: str
next_hop_self:
description:
- Enable/disable IPv4 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
next_hop_self6:
description:
- Enable/disable IPv6 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
override_capability:
description:
- Enable/disable override result of capability negotiation.
type: str
choices:
- enable
- disable
passive:
description:
- Enable/disable sending of open messages to this neighbor.
type: str
choices:
- enable
- disable
prefix_list_in:
description:
- IPv4 Inbound filter for updates from this neighbor. Source router.prefix-list.name.
type: str
prefix_list_in6:
description:
- IPv6 Inbound filter for updates from this neighbor. Source router.prefix-list6.name.
type: str
prefix_list_out:
description:
- IPv4 Outbound filter for updates to this neighbor. Source router.prefix-list.name.
type: str
prefix_list_out6:
description:
- IPv6 Outbound filter for updates to this neighbor. Source router.prefix-list6.name.
type: str
remote_as:
description:
- AS number of neighbor.
type: int
remove_private_as:
description:
- Enable/disable remove private AS number from IPv4 outbound updates.
type: str
choices:
- enable
- disable
remove_private_as6:
description:
- Enable/disable remove private AS number from IPv6 outbound updates.
type: str
choices:
- enable
- disable
restart_time:
description:
- Graceful restart delay time (sec, 0 = global default).
type: int
retain_stale_time:
description:
- Time to retain stale routes.
type: int
route_map_in:
description:
- IPv4 Inbound route map filter. Source router.route-map.name.
type: str
route_map_in6:
description:
- IPv6 Inbound route map filter. Source router.route-map.name.
type: str
route_map_out:
description:
- IPv4 Outbound route map filter. Source router.route-map.name.
type: str
route_map_out6:
description:
- IPv6 Outbound route map filter. Source router.route-map.name.
type: str
route_reflector_client:
description:
- Enable/disable IPv4 AS route reflector client.
type: str
choices:
- enable
- disable
route_reflector_client6:
description:
- Enable/disable IPv6 AS route reflector client.
type: str
choices:
- enable
- disable
route_server_client:
description:
- Enable/disable IPv4 AS route server client.
type: str
choices:
- enable
- disable
route_server_client6:
description:
- Enable/disable IPv6 AS route server client.
type: str
choices:
- enable
- disable
send_community:
description:
- IPv4 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
send_community6:
description:
- IPv6 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
shutdown:
description:
- Enable/disable shutdown this neighbor.
type: str
choices:
- enable
- disable
soft_reconfiguration:
description:
- Enable/disable allow IPv4 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
soft_reconfiguration6:
description:
- Enable/disable allow IPv6 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
stale_route:
description:
- Enable/disable stale route after neighbor down.
type: str
choices:
- enable
- disable
strict_capability_match:
description:
- Enable/disable strict capability matching.
type: str
choices:
- enable
- disable
unsuppress_map:
description:
- IPv4 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
unsuppress_map6:
description:
- IPv6 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
update_source:
description:
- Interface to use as source IP/IPv6 address of TCP connections. Source system.interface.name.
type: str
weight:
description:
- Neighbor weight.
type: int
neighbor_range:
description:
- BGP neighbor range table.
type: list
suboptions:
id:
description:
- Neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix:
description:
- Neighbor range prefix.
type: str
neighbor_range6:
description:
- BGP IPv6 neighbor range table.
type: list
suboptions:
id:
description:
- IPv6 neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix6:
description:
- IPv6 prefix.
type: str
network:
description:
- BGP network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix:
description:
- Network prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
network_import_check:
description:
- Enable/disable ensure BGP network route exists in IGP.
type: str
choices:
- enable
- disable
network6:
description:
- BGP IPv6 network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix6:
description:
- Network IPv6 prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
redistribute:
description:
- BGP IPv4 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
redistribute6:
description:
- BGP IPv6 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
router_id:
description:
- Router ID.
type: str
scan_time:
description:
- Background scanner interval (sec), 0 to disable it.
type: int
synchronization:
description:
- Enable/disable only advertise routes from iBGP if routes present in an IGP.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure BGP.
fortios_router_bgp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_bgp:
admin_distance:
-
distance: "4"
id: "5"
neighbour_prefix: "<your_own_value>"
route_list: "<your_own_value> (source router.access-list.name)"
aggregate_address:
-
as_set: "enable"
id: "10"
prefix: "<your_own_value>"
summary_only: "enable"
aggregate_address6:
-
as_set: "enable"
id: "15"
prefix6: "<your_own_value>"
summary_only: "enable"
always_compare_med: "enable"
as: "19"
bestpath_as_path_ignore: "enable"
bestpath_cmp_confed_aspath: "enable"
bestpath_cmp_routerid: "enable"
bestpath_med_confed: "enable"
bestpath_med_missing_as_worst: "enable"
client_to_client_reflection: "enable"
cluster_id: "<your_own_value>"
confederation_identifier: "27"
confederation_peers:
-
peer: "<your_own_value>"
dampening: "enable"
dampening_max_suppress_time: "31"
dampening_reachability_half_life: "32"
dampening_reuse: "33"
dampening_route_map: "<your_own_value> (source router.route-map.name)"
dampening_suppress: "35"
dampening_unreachability_half_life: "36"
default_local_preference: "37"
deterministic_med: "enable"
distance_external: "39"
distance_internal: "40"
distance_local: "41"
ebgp_multipath: "enable"
enforce_first_as: "enable"
fast_external_failover: "enable"
graceful_end_on_timer: "enable"
graceful_restart: "enable"
graceful_restart_time: "47"
graceful_stalepath_time: "48"
graceful_update_delay: "49"
holdtime_timer: "50"
ibgp_multipath: "enable"
ignore_optional_capability: "enable"
keepalive_timer: "53"
log_neighbour_changes: "enable"
neighbor:
-
activate: "enable"
activate6: "enable"
advertisement_interval: "58"
allowas_in: "59"
allowas_in_enable: "enable"
allowas_in_enable6: "enable"
allowas_in6: "62"
as_override: "enable"
as_override6: "enable"
attribute_unchanged: "as-path"
attribute_unchanged6: "as-path"
bfd: "enable"
capability_default_originate: "enable"
capability_default_originate6: "enable"
capability_dynamic: "enable"
capability_graceful_restart: "enable"
capability_graceful_restart6: "enable"
capability_orf: "none"
capability_orf6: "none"
capability_route_refresh: "enable"
conditional_advertise:
-
advertise_routemap: "<your_own_value> (source router.route-map.name)"
condition_routemap: "<your_own_value> (source router.route-map.name)"
condition_type: "exist"
connect_timer: "80"
default_originate_routemap: "<your_own_value> (source router.route-map.name)"
default_originate_routemap6: "<your_own_value> (source router.route-map.name)"
description: "<your_own_value>"
distribute_list_in: "<your_own_value> (source router.access-list.name)"
distribute_list_in6: "<your_own_value> (source router.access-list6.name)"
distribute_list_out: "<your_own_value> (source router.access-list.name)"
distribute_list_out6: "<your_own_value> (source router.access-list6.name)"
dont_capability_negotiate: "enable"
ebgp_enforce_multihop: "enable"
ebgp_multihop_ttl: "90"
filter_list_in: "<your_own_value> (source router.aspath-list.name)"
filter_list_in6: "<your_own_value> (source router.aspath-list.name)"
filter_list_out: "<your_own_value> (source router.aspath-list.name)"
filter_list_out6: "<your_own_value> (source router.aspath-list.name)"
holdtime_timer: "95"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
keep_alive_timer: "98"
link_down_failover: "enable"
local_as: "100"
local_as_no_prepend: "enable"
local_as_replace_as: "enable"
maximum_prefix: "103"
maximum_prefix_threshold: "104"
maximum_prefix_threshold6: "105"
maximum_prefix_warning_only: "enable"
maximum_prefix_warning_only6: "enable"
maximum_prefix6: "108"
next_hop_self: "enable"
next_hop_self6: "enable"
override_capability: "enable"
passive: "enable"
password: "<your_own_value>"
prefix_list_in: "<your_own_value> (source router.prefix-list.name)"
prefix_list_in6: "<your_own_value> (source router.prefix-list6.name)"
prefix_list_out: "<your_own_value> (source router.prefix-list.name)"
prefix_list_out6: "<your_own_value> (source router.prefix-list6.name)"
remote_as: "118"
remove_private_as: "enable"
remove_private_as6: "enable"
restart_time: "121"
retain_stale_time: "122"
route_map_in: "<your_own_value> (source router.route-map.name)"
route_map_in6: "<your_own_value> (source router.route-map.name)"
route_map_out: "<your_own_value> (source router.route-map.name)"
route_map_out6: "<your_own_value> (source router.route-map.name)"
route_reflector_client: "enable"
route_reflector_client6: "enable"
route_server_client: "enable"
route_server_client6: "enable"
send_community: "standard"
send_community6: "standard"
shutdown: "enable"
soft_reconfiguration: "enable"
soft_reconfiguration6: "enable"
stale_route: "enable"
strict_capability_match: "enable"
unsuppress_map: "<your_own_value> (source router.route-map.name)"
unsuppress_map6: "<your_own_value> (source router.route-map.name)"
update_source: "<your_own_value> (source system.interface.name)"
weight: "141"
neighbor_group:
-
activate: "enable"
activate6: "enable"
advertisement_interval: "145"
allowas_in: "146"
allowas_in_enable: "enable"
allowas_in_enable6: "enable"
allowas_in6: "149"
as_override: "enable"
as_override6: "enable"
attribute_unchanged: "as-path"
attribute_unchanged6: "as-path"
bfd: "enable"
capability_default_originate: "enable"
capability_default_originate6: "enable"
capability_dynamic: "enable"
capability_graceful_restart: "enable"
capability_graceful_restart6: "enable"
capability_orf: "none"
capability_orf6: "none"
capability_route_refresh: "enable"
connect_timer: "163"
default_originate_routemap: "<your_own_value> (source router.route-map.name)"
default_originate_routemap6: "<your_own_value> (source router.route-map.name)"
description: "<your_own_value>"
distribute_list_in: "<your_own_value> (source router.access-list.name)"
distribute_list_in6: "<your_own_value> (source router.access-list6.name)"
distribute_list_out: "<your_own_value> (source router.access-list.name)"
distribute_list_out6: "<your_own_value> (source router.access-list6.name)"
dont_capability_negotiate: "enable"
ebgp_enforce_multihop: "enable"
ebgp_multihop_ttl: "173"
filter_list_in: "<your_own_value> (source router.aspath-list.name)"
filter_list_in6: "<your_own_value> (source router.aspath-list.name)"
filter_list_out: "<your_own_value> (source router.aspath-list.name)"
filter_list_out6: "<your_own_value> (source router.aspath-list.name)"
holdtime_timer: "178"
interface: "<your_own_value> (source system.interface.name)"
keep_alive_timer: "180"
link_down_failover: "enable"
local_as: "182"
local_as_no_prepend: "enable"
local_as_replace_as: "enable"
maximum_prefix: "185"
maximum_prefix_threshold: "186"
maximum_prefix_threshold6: "187"
maximum_prefix_warning_only: "enable"
maximum_prefix_warning_only6: "enable"
maximum_prefix6: "190"
name: "default_name_191"
next_hop_self: "enable"
next_hop_self6: "enable"
override_capability: "enable"
passive: "enable"
prefix_list_in: "<your_own_value> (source router.prefix-list.name)"
prefix_list_in6: "<your_own_value> (source router.prefix-list6.name)"
prefix_list_out: "<your_own_value> (source router.prefix-list.name)"
prefix_list_out6: "<your_own_value> (source router.prefix-list6.name)"
remote_as: "200"
remove_private_as: "enable"
remove_private_as6: "enable"
restart_time: "203"
retain_stale_time: "204"
route_map_in: "<your_own_value> (source router.route-map.name)"
route_map_in6: "<your_own_value> (source router.route-map.name)"
route_map_out: "<your_own_value> (source router.route-map.name)"
route_map_out6: "<your_own_value> (source router.route-map.name)"
route_reflector_client: "enable"
route_reflector_client6: "enable"
route_server_client: "enable"
route_server_client6: "enable"
send_community: "standard"
send_community6: "standard"
shutdown: "enable"
soft_reconfiguration: "enable"
soft_reconfiguration6: "enable"
stale_route: "enable"
strict_capability_match: "enable"
unsuppress_map: "<your_own_value> (source router.route-map.name)"
unsuppress_map6: "<your_own_value> (source router.route-map.name)"
update_source: "<your_own_value> (source system.interface.name)"
weight: "223"
neighbor_range:
-
id: "225"
max_neighbor_num: "226"
neighbor_group: "<your_own_value> (source router.bgp.neighbor-group.name)"
prefix: "<your_own_value>"
neighbor_range6:
-
id: "230"
max_neighbor_num: "231"
neighbor_group: "<your_own_value> (source router.bgp.neighbor-group.name)"
prefix6: "<your_own_value>"
network:
-
backdoor: "enable"
id: "236"
prefix: "<your_own_value>"
route_map: "<your_own_value> (source router.route-map.name)"
network_import_check: "enable"
network6:
-
backdoor: "enable"
id: "242"
prefix6: "<your_own_value>"
route_map: "<your_own_value> (source router.route-map.name)"
redistribute:
-
name: "default_name_246"
route_map: "<your_own_value> (source router.route-map.name)"
status: "enable"
redistribute6:
-
name: "default_name_250"
route_map: "<your_own_value> (source router.route-map.name)"
status: "enable"
router_id: "<your_own_value>"
scan_time: "254"
synchronization: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_bgp_data(json):
option_list = ['admin_distance', 'aggregate_address', 'aggregate_address6',
'always_compare_med', 'as', 'bestpath_as_path_ignore',
'bestpath_cmp_confed_aspath', 'bestpath_cmp_routerid', 'bestpath_med_confed',
'bestpath_med_missing_as_worst', 'client_to_client_reflection', 'cluster_id',
'confederation_identifier', 'confederation_peers', 'dampening',
'dampening_max_suppress_time', 'dampening_reachability_half_life', 'dampening_reuse',
'dampening_route_map', 'dampening_suppress', 'dampening_unreachability_half_life',
'default_local_preference', 'deterministic_med', 'distance_external',
'distance_internal', 'distance_local', 'ebgp_multipath',
'enforce_first_as', 'fast_external_failover', 'graceful_end_on_timer',
'graceful_restart', 'graceful_restart_time', 'graceful_stalepath_time',
'graceful_update_delay', 'holdtime_timer', 'ibgp_multipath',
'ignore_optional_capability', 'keepalive_timer', 'log_neighbour_changes',
'neighbor', 'neighbor_group', 'neighbor_range',
'neighbor_range6', 'network', 'network_import_check',
'network6', 'redistribute', 'redistribute6',
'router_id', 'scan_time', 'synchronization']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_bgp(data, fos):
vdom = data['vdom']
router_bgp_data = data['router_bgp']
filtered_data = underscore_to_hyphen(filter_router_bgp_data(router_bgp_data))
return fos.set('router',
'bgp',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_bgp']:
resp = router_bgp(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"router_bgp": {
"required": False, "type": "dict", "default": None,
"options": {
"admin_distance": {"required": False, "type": "list",
"options": {
"distance": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"neighbour_prefix": {"required": False, "type": "str"},
"route_list": {"required": False, "type": "str"}
}},
"aggregate_address": {"required": False, "type": "list",
"options": {
"as_set": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"summary_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"aggregate_address6": {"required": False, "type": "list",
"options": {
"as_set": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix6": {"required": False, "type": "str"},
"summary_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"always_compare_med": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as": {"required": False, "type": "int"},
"bestpath_as_path_ignore": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_cmp_confed_aspath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_cmp_routerid": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_med_confed": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"bestpath_med_missing_as_worst": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client_to_client_reflection": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"cluster_id": {"required": False, "type": "str"},
"confederation_identifier": {"required": False, "type": "int"},
"confederation_peers": {"required": False, "type": "list",
"options": {
"peer": {"required": True, "type": "str"}
}},
"dampening": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dampening_max_suppress_time": {"required": False, "type": "int"},
"dampening_reachability_half_life": {"required": False, "type": "int"},
"dampening_reuse": {"required": False, "type": "int"},
"dampening_route_map": {"required": False, "type": "str"},
"dampening_suppress": {"required": False, "type": "int"},
"dampening_unreachability_half_life": {"required": False, "type": "int"},
"default_local_preference": {"required": False, "type": "int"},
"deterministic_med": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"distance_external": {"required": False, "type": "int"},
"distance_internal": {"required": False, "type": "int"},
"distance_local": {"required": False, "type": "int"},
"ebgp_multipath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"enforce_first_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fast_external_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_end_on_timer": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"graceful_restart_time": {"required": False, "type": "int"},
"graceful_stalepath_time": {"required": False, "type": "int"},
"graceful_update_delay": {"required": False, "type": "int"},
"holdtime_timer": {"required": False, "type": "int"},
"ibgp_multipath": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_optional_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"keepalive_timer": {"required": False, "type": "int"},
"log_neighbour_changes": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"neighbor": {"required": False, "type": "list",
"options": {
"activate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"activate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"advertisement_interval": {"required": False, "type": "int"},
"allowas_in": {"required": False, "type": "int"},
"allowas_in_enable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in_enable6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in6": {"required": False, "type": "int"},
"as_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as_override6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"attribute_unchanged": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"attribute_unchanged6": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"bfd": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_dynamic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_orf": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_orf6": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_route_refresh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"conditional_advertise": {"required": False, "type": "list",
"options": {
"advertise_routemap": {"required": False, "type": "str"},
"condition_routemap": {"required": False, "type": "str"},
"condition_type": {"required": False, "type": "str",
"choices": ["exist", "non-exist"]}
}},
"connect_timer": {"required": False, "type": "int"},
"default_originate_routemap": {"required": False, "type": "str"},
"default_originate_routemap6": {"required": False, "type": "str"},
"description": {"required": False, "type": "str"},
"distribute_list_in": {"required": False, "type": "str"},
"distribute_list_in6": {"required": False, "type": "str"},
"distribute_list_out": {"required": False, "type": "str"},
"distribute_list_out6": {"required": False, "type": "str"},
"dont_capability_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_enforce_multihop": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_multihop_ttl": {"required": False, "type": "int"},
"filter_list_in": {"required": False, "type": "str"},
"filter_list_in6": {"required": False, "type": "str"},
"filter_list_out": {"required": False, "type": "str"},
"filter_list_out6": {"required": False, "type": "str"},
"holdtime_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": True, "type": "str"},
"keep_alive_timer": {"required": False, "type": "int"},
"link_down_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as": {"required": False, "type": "int"},
"local_as_no_prepend": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as_replace_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix": {"required": False, "type": "int"},
"maximum_prefix_threshold": {"required": False, "type": "int"},
"maximum_prefix_threshold6": {"required": False, "type": "int"},
"maximum_prefix_warning_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix_warning_only6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix6": {"required": False, "type": "int"},
"next_hop_self": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"next_hop_self6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"passive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"password": {"required": False, "type": "str"},
"prefix_list_in": {"required": False, "type": "str"},
"prefix_list_in6": {"required": False, "type": "str"},
"prefix_list_out": {"required": False, "type": "str"},
"prefix_list_out6": {"required": False, "type": "str"},
"remote_as": {"required": False, "type": "int"},
"remove_private_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remove_private_as6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"restart_time": {"required": False, "type": "int"},
"retain_stale_time": {"required": False, "type": "int"},
"route_map_in": {"required": False, "type": "str"},
"route_map_in6": {"required": False, "type": "str"},
"route_map_out": {"required": False, "type": "str"},
"route_map_out6": {"required": False, "type": "str"},
"route_reflector_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_reflector_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"send_community": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"send_community6": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"shutdown": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"stale_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strict_capability_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"unsuppress_map": {"required": False, "type": "str"},
"unsuppress_map6": {"required": False, "type": "str"},
"update_source": {"required": False, "type": "str"},
"weight": {"required": False, "type": "int"}
}},
"neighbor_group": {"required": False, "type": "list",
"options": {
"activate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"activate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"advertisement_interval": {"required": False, "type": "int"},
"allowas_in": {"required": False, "type": "int"},
"allowas_in_enable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in_enable6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"allowas_in6": {"required": False, "type": "int"},
"as_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"as_override6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"attribute_unchanged": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"attribute_unchanged6": {"required": False, "type": "str",
"choices": ["as-path", "med", "next-hop"]},
"bfd": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_default_originate6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_dynamic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_graceful_restart6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"capability_orf": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_orf6": {"required": False, "type": "str",
"choices": ["none", "receive", "send",
"both"]},
"capability_route_refresh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"connect_timer": {"required": False, "type": "int"},
"default_originate_routemap": {"required": False, "type": "str"},
"default_originate_routemap6": {"required": False, "type": "str"},
"description": {"required": False, "type": "str"},
"distribute_list_in": {"required": False, "type": "str"},
"distribute_list_in6": {"required": False, "type": "str"},
"distribute_list_out": {"required": False, "type": "str"},
"distribute_list_out6": {"required": False, "type": "str"},
"dont_capability_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_enforce_multihop": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ebgp_multihop_ttl": {"required": False, "type": "int"},
"filter_list_in": {"required": False, "type": "str"},
"filter_list_in6": {"required": False, "type": "str"},
"filter_list_out": {"required": False, "type": "str"},
"filter_list_out6": {"required": False, "type": "str"},
"holdtime_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"keep_alive_timer": {"required": False, "type": "int"},
"link_down_failover": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as": {"required": False, "type": "int"},
"local_as_no_prepend": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_as_replace_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix": {"required": False, "type": "int"},
"maximum_prefix_threshold": {"required": False, "type": "int"},
"maximum_prefix_threshold6": {"required": False, "type": "int"},
"maximum_prefix_warning_only": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix_warning_only6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"maximum_prefix6": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"next_hop_self": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"next_hop_self6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override_capability": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"passive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"prefix_list_in": {"required": False, "type": "str"},
"prefix_list_in6": {"required": False, "type": "str"},
"prefix_list_out": {"required": False, "type": "str"},
"prefix_list_out6": {"required": False, "type": "str"},
"remote_as": {"required": False, "type": "int"},
"remove_private_as": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remove_private_as6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"restart_time": {"required": False, "type": "int"},
"retain_stale_time": {"required": False, "type": "int"},
"route_map_in": {"required": False, "type": "str"},
"route_map_in6": {"required": False, "type": "str"},
"route_map_out": {"required": False, "type": "str"},
"route_map_out6": {"required": False, "type": "str"},
"route_reflector_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_reflector_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"route_server_client6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"send_community": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"send_community6": {"required": False, "type": "str",
"choices": ["standard", "extended", "both",
"disable"]},
"shutdown": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"soft_reconfiguration6": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"stale_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strict_capability_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"unsuppress_map": {"required": False, "type": "str"},
"unsuppress_map6": {"required": False, "type": "str"},
"update_source": {"required": False, "type": "str"},
"weight": {"required": False, "type": "int"}
}},
"neighbor_range": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"max_neighbor_num": {"required": False, "type": "int"},
"neighbor_group": {"required": False, "type": "str"},
"prefix": {"required": False, "type": "str"}
}},
"neighbor_range6": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"max_neighbor_num": {"required": False, "type": "int"},
"neighbor_group": {"required": False, "type": "str"},
"prefix6": {"required": False, "type": "str"}
}},
"network": {"required": False, "type": "list",
"options": {
"backdoor": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"route_map": {"required": False, "type": "str"}
}},
"network_import_check": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"network6": {"required": False, "type": "list",
"options": {
"backdoor": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"id": {"required": True, "type": "int"},
"prefix6": {"required": False, "type": "str"},
"route_map": {"required": False, "type": "str"}
}},
"redistribute": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"route_map": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"redistribute6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"},
"route_map": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"router_id": {"required": False, "type": "str"},
"scan_time": {"required": False, "type": "int"},
"synchronization": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 48.610804
| 122
| 0.400669
| 7,984
| 114,284
| 5.568136
| 0.075777
| 0.059992
| 0.094835
| 0.087277
| 0.784843
| 0.757558
| 0.72926
| 0.714099
| 0.690233
| 0.675904
| 0
| 0.010374
| 0.518375
| 114,284
| 2,350
| 123
| 48.631489
| 0.7973
| 0.006256
| 0
| 0.793464
| 0
| 0.000871
| 0.755163
| 0.066418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00305
| false
| 0.007843
| 0.004793
| 0.000436
| 0.010022
| 0.000436
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c2547bea58b43dc4107d2e1e712d9238550f0f4
| 8,340
|
py
|
Python
|
app/request.py
|
Nasseh123/news-api
|
8ba44fdc8ba92982eb496055ca326de502165f87
|
[
"Unlicense"
] | null | null | null |
app/request.py
|
Nasseh123/news-api
|
8ba44fdc8ba92982eb496055ca326de502165f87
|
[
"Unlicense"
] | null | null | null |
app/request.py
|
Nasseh123/news-api
|
8ba44fdc8ba92982eb496055ca326de502165f87
|
[
"Unlicense"
] | null | null | null |
import urllib.request,json#will help us create a connection to our API URL and send a request and json modules that will format the JSON response to a Python dictionary.
from .models import Newssource,Newsarticle
# Getting API Key and the urls
api_key = None
Newssource_url = None
Newsarticle_url = None
NewsarticleSearch_url = None
NEWSURL=None
NEWSHEADLINES=None
def config_request(app):
global api_key,Newssource_url,Newsarticle_url,NewsarticleSearch_url,NEWSURL,NEWSHEADLINES
api_key=app.config['NEWS_API_KEY']
Newssource_url=app.config['NEWS_API_BASE_URL']
Newsarticle_url=app.config['NEWS_ARTICLE_BASE_URL']
NewsarticleSearch_url=app.config['NEWS_ARTICLE_SEARCH_BASE_URL']
NEWSURL=app.config['NEWSURL']
NEWSHEADLINES=app.config['NEWS_HEADLINES']
def get_newssource(category):
'''
Function that gets the json response to our url request
'''
get_news_url=NEWSURL.format(category,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_newssource_data=url.read()
get_newssource_response=json.loads(get_newssource_data)
Newssource_results = None
if get_newssource_response['sources']:
Newssource_results_list=get_newssource_response['sources']
Newssource_results=process_results(Newssource_results_list)
return Newssource_results
def process_results(newssource_list):
'''
Function that processes the Newssource_results and transform them to a list of Objects
Args:
Newssource_list: A list of dictionaries that contain news sources
Returns :
Newssource_results: A list of news objects
'''
Newssource_results = []
for newssource_item in newssource_list:
id=newssource_item.get('id')
name=newssource_item.get('name')
description=newssource_item.get('description')
url=newssource_item.get('url')
category=newssource_item.get('category')
language=newssource_item.get('language')
country=newssource_item.get('country')
if name:
Newssource_object=Newssource(id,name,description,url,category,language,country)
Newssource_results.append(Newssource_object)
return Newssource_results
def get_news_article(source):
# source=news_article.get('url')
get_Newsarticle_url=Newsarticle_url.format(source,api_key)
with urllib.request.urlopen(get_Newsarticle_url)as url:
Newsarticle_details_data=url.read()
Newsarticle_details_response=json.loads(Newsarticle_details_data)
newsarticle_object=None
if Newsarticle_details_response['articles']:
Newssource_results_list=Newsarticle_details_response['articles']
newsarticle_object=process_results_article(Newssource_results_list)
# print(Newsarticle_details_response)
return newsarticle_object
def process_results_article(newsarticle_list):
'''
Function that processes the Newssource_results and transform them to a list of Objects
Args:
Newssource_list: A list of dictionaries that contain news sources
Returns :
Newssource_results: A list of news objects
'''
Newsarticle_results = []
for newsarticle_item in newsarticle_list:
# print (Newsarticle_results)
id=newsarticle_item.get('id')
name=newsarticle_item.get('name')
author=newsarticle_item.get('author')
title=newsarticle_item.get('title')
urlToImage=newsarticle_item.get('urlToImage')
description=newsarticle_item.get('description')
url=newsarticle_item.get('url')
publishedAt=newsarticle_item.get('publishedAt')
content=newsarticle_item.get('content')
# print(newsarticle_item)
if url:
newsarticle_object=Newsarticle(id,name,author,title,urlToImage,description,url,publishedAt,content)
Newsarticle_results.append(newsarticle_object)
return Newsarticle_results
def search_newsarticle(articlesTitle):
News_Article_search_URL=NewsarticleSearch_url.format(articlesTitle,api_key)
with urllib.request.urlopen(News_Article_search_URL) as url:
search_Article_data=url.read()
search_Article_response=json.loads(search_Article_data)
search_Article_results=None
if search_Article_response['articles']:
search_Article_list=search_Article_response['articles']
search_Article_results=process_results_article(search_Article_list)
return search_Article_results
def get_news(category):
'''
Function that gets the json response to our url request
'''
get_newssource_url=NEWSURL.format(category,api_key)
with urllib.request.urlopen(get_newssource_url) as url:
get_newssource_data=url.read()
get_newssource_response=json.loads(get_newssource_data)
Newssource_results = None
if get_newssource_response['sources']:
Newssource_results_list=get_newssource_response['sources']
Newssource_results=process_results(Newssource_results_list)
return Newssource_results
def process_results(newssource_list):
'''
Function that processes the Newssource_results and transform them to a list of Objects
Args:
Newssource_list: A list of dictionaries that contain news sources
Returns :
Newssource_results: A list of news objects
'''
Newssource_results = []
for newssource_item in newssource_list:
id=newssource_item.get('id')
name=newssource_item.get('name')
description=newssource_item.get('description')
url=newssource_item.get('url')
category=newssource_item.get('category')
language=newssource_item.get('language')
country=newssource_item.get('country')
if name:
Newssource_object=Newssource(id,name,description,url,category,language,country)
Newssource_results.append(Newssource_object)
return Newssource_results
# *******************************************
def get_new_headlines():
# source=news_article.get('url')
get_Newsheadlines=NEWSHEADLINES.format(api_key)
with urllib.request.urlopen(get_Newsheadlines)as url:
Newsarticle_details_data=url.read()
Newsarticle_details_response=json.loads(Newsarticle_details_data)
newsarticle_object=None
if Newsarticle_details_response['articles']:
Newssource_results_list=Newsarticle_details_response['articles']
newsarticle_object=process_results_article(Newssource_results_list)
# print(Newsarticle_details_response)
return newsarticle_object
def process_results_article(newsarticle_list):
'''
Function that processes the Newssource_results and transform them to a list of Objects
Args:
Newssource_list: A list of dictionaries that contain news sources
Returns :
Newssource_results: A list of news objects
'''
Newsarticle_results = []
for newsarticle_item in newsarticle_list:
# print (Newsarticle_results)
id=newsarticle_item.get('id')
name=newsarticle_item.get('name')
author=newsarticle_item.get('author')
title=newsarticle_item.get('title')
urlToImage=newsarticle_item.get('urlToImage')
description=newsarticle_item.get('description')
url=newsarticle_item.get('url')
publishedAt=newsarticle_item.get('publishedAt')
content=newsarticle_item.get('content')
# print(newsarticle_item)
if urlToImage:
newsarticle_object=Newsarticle(id,name,author,title,urlToImage,description,url,publishedAt,content)
Newsarticle_results.append(newsarticle_object)
return Newsarticle_results
def search_newsarticle(articlesTitle):
News_Article_search_URL=NewsarticleSearch_url.format(articlesTitle,api_key)
with urllib.request.urlopen(News_Article_search_URL) as url:
search_Article_data=url.read()
search_Article_response=json.loads(search_Article_data)
search_Article_results=None
if search_Article_response['articles']:
search_Article_list=search_Article_response['articles']
search_Article_results=process_results_article(search_Article_list)
return search_Article_results
| 36.26087
| 169
| 0.727098
| 963
| 8,340
| 6.007269
| 0.09242
| 0.038721
| 0.056007
| 0.016595
| 0.86949
| 0.861538
| 0.85255
| 0.841141
| 0.841141
| 0.841141
| 0
| 0
| 0.192566
| 8,340
| 229
| 170
| 36.419214
| 0.859073
| 0.174221
| 0
| 0.779412
| 0
| 0
| 0.058762
| 0.00729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080882
| false
| 0
| 0.014706
| 0
| 0.169118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c6ffa1e21bb3d8d180997f0832ee01b88046419
| 10,791
|
py
|
Python
|
day09/day09.py
|
dancergraham/advent_of_code_2021
|
931e56d90fdaccd9ad14c3eb0e4826a87eb9ddc4
|
[
"MIT"
] | 5
|
2021-12-02T12:12:28.000Z
|
2022-01-08T23:19:53.000Z
|
day09/day09.py
|
dancergraham/advent_of_code_2021
|
931e56d90fdaccd9ad14c3eb0e4826a87eb9ddc4
|
[
"MIT"
] | null | null | null |
day09/day09.py
|
dancergraham/advent_of_code_2021
|
931e56d90fdaccd9ad14c3eb0e4826a87eb9ddc4
|
[
"MIT"
] | null | null | null |
# File for execution in IronPython 2.7 inside Rhino version 7
import rhinoscriptsyntax as rs
rs.EnableRedraw(False)
srf = rs.AddSrfPt([[0, 0],
[1, 0],
[1, 1],
[0, 1]])
s = """7654598954321095410125798754578999894323456789349878901298743234767897899987654234567895493239656798\n6543487895992987324234599543467998789934567893298767892987643123457976789998762123456789989198768987\n7632356789889876535345987654788987678897678954997656789998784434578965699999878244678997978999999876\n7543487896673987676767898995689876556789789769876545698999896549989434988799954345989896767899886765\n9854567965432398989898959987789987347899899879765434567893987698999549877669875656798765656998765454\n8767778954321239799979545299897898258976989989898545679932198987987698765457989767897654343987654323\n9878989765932345689965432134956789349965678999987656998991019876798987654346799898998743212398787634\n9989999979896456789876541012347898767894567899898769897989198765689996543235678969987632101239896545\n9999987898789767899985432123456789898993678998789898796778987654677897652124689659876543212548987656\n8999876765678998989996543234567899969789799987678997655569898943456894321034569543987854433467898767\n7898765454578949678987854345688989954656989876549876543456789852345789632123458932398767654878949878\n6987654323469434589798975458799569893239876957234987632347898761245689543234567893499898965989432999\n5698765545678995695699876569893456789198765432145698743456987650156897656745689994989989899998949976\n4569877658799989954987987678974567891019898547656899856769898741456789789856789989878965798987998765\n3567998969989877896996598789765678943423987668767956967898765432367899893979899876767954567896899994\n2357679879878766789895459899899889654654598979979349878939898543498987991989999765656893678965798789\n1234589998769655456799349978912998798777679899899556989321987654567896889999997654346789799654987678\n0395679319943242345678998767899879899988789798788967896532399765678965678909876543234599898943498989\n1989989429832101236789987656998765976799998674667998987649499887889434599214987653125989987892349994\n9878998998763212367895498547899894345999876543456899298798989998997323989923987432014578976989457893\n8867897987654723456789597634989989459898765632566789349897678999765439879894986542123456895678998912\n7658956798765674567999999745678978998769854320175898959986569899876598767789997656254589934599889201\n6545697899986897678989899869899569987656965431234567898775458789987699654678998867767678910987678912\n7656789998997998989976789878923498899549876532678979987654349689998986543567999988878989329976567893\n8789891297698979398765398989549987678932998743569989876743234567989995432178898799989799498765458989\n9898989975459763209879987899998976567891987656798795975432125698979876543236789642395678987654345678\n4987678984348954345998976789876565457890999767897654986543026789767987674345894321024567898543234567\n3986567895467895956987895678975434356999879888998543297654534895455698765667895933235878987432103458\n9895456789578999899876724567954321234598765999789432198785675954324789887779999894345699876553212667\n6754345678989998798765213479543210145987654545678944569896786943212398998889998795456789987664333569\n5421234789999987679854374678954321259876543234599655679987897894103987769999997689567899899965654698\n4310123678999976598765465678998542367965432123698769798698998965212396556789876578978998768898765987\n5523234599998765439876566789987656459876521012789978987549899954334965434898765467899999656789879876\n7654345678989876321989699892198767867998643223898989998634789876449896525987654356789986547678989985\n8795959789878987530198789921019878989987654354567897899745678987898765434598876245699875434567898954\n9989898999769876545239896542199999999798765469798976799898789599939878545679765123987654323456987653\n9876787898954989654345987543578934987659986878939995989939995432323989959797543245798545212347896742\n9985676567969898765467897664989123498547897989023989678924789541012399898987674356987632101498965431\n9654324456998789898567999879991012999656798998939876569015678932143498787898789569876543412359894320\n8743212345897698987688987989989139898767899567899987894323799993254987656789899878989654323456789431\n9854599456789587898799876797878999789979923456999898965434989989345986545699932999998765434567896542\n7965678567893436999898765456567987679989212377898769876559878978959877321789921989899878755678976543\n6798789678932125899987654343459876578997602457997653987698967869898766210867892979789989867889987654\n5679898789543234789999743212345965469876543458943212398987856956789854321456999767678999878991098967\n4298969897654545678987654343459876568987754567894323459996543245698766432378987654569899999892989878\n5987654998787679799998765654569987678999865678965654698875442134459887543569876543458789876789876989\n6798543459898789898999876767898998789434976789998785987654321012345998764598765432345678965498765498\n7987632367999891967789989898987859899325987899989899898765532133498999878909854321234589874349986567\n9876521459895910145694393999876745978976798999879998769876743654567898999919873210145698943235987678\n8765410498754321234893212598765434567897949998768999954987858795678967987896954321234567892126998789\n7654321239875436545789343459876745678998934989656589892098969989789459896545967434345678921019899893\n8987432389876587656789754567987896789239895976545476789129998978994398765429876545559789992398765912\n9876543478987698769999967878998989897456789895432345678934987656789219873212987656767999989987654101\n9987654569998899898899898989899878976567898789321234578949896545898998754101498767898999878986543212\n9998987678969910987778789998768767898978989678910123789798765435667899968912369878959989769897656323\n8949798799654329896565678987657656899989878569434236897698754323458789879893458989345678956798787434\n7939659899965698785454567898542546788998765458946345896597653212345678998799567893234569543239996565\n6898946999896987676323458987651234567899876767895456789987432101234589989678978942123498932123987876\n5687899998789996543212767898540145678999987898976587998996543212347679878567899543014567893234598987\n4546678987678987652103456789432234589989998989987698977987654323456798765456987654165678954545679998\n3234567986568997653212367896543445678978949876798789766498765634567899874345896543236799765676789999\n4345679875476798765435458987654598789765432765689897654329886797678932965212789754347899898787899887\n5678989994365679896547569998895679899876741534589986543212999898999549984345678965458999979898945676\n8789299976234569999658678939976789998998810125678987654109212999745998765756789876567898763999432445\n9891019876345678998769789923987892987654323236989998783298999987659899876867893987689987651299921234\n7942323965476989019878899895998921099798434587999999894987888998798789989978932398789299740987892545\n6543499876787893298989998789899933129897659678989899999876567899897698799989321239899398921976789656\n9654987987898994987899987676789896534999798989878789998765437899986545678996560123978987899875678967\n8769876598939989876789876545696789645698987898765678987654326799765434569897671234569876797654589998\n9879765439123878985698765434545678956987876789654567899976715987654323456789982367898765679863689989\n9998974321012367894349854321234567899876765678943456789897924598765434878996543456789876889954599878\n9876795432123456999298765410123456789965634567892567898789897679876565989997656567896989999875789767\n1965689943234567898949876523534567899854323456789678987698789799989696799879897679945691012986797545\n2954567899999678997834987854678978998765414567898789876544678989998789898965969895434889129799896535\n9876698978788989986325698998789989349876565878999899965433567878999892967893459954323978998656965421\n4988789767687899965434789589894391234987676789899998765322346567898901256932398767434569997345799210\n3499899856566789876895893478943210123498989896789999893210123457897432347899499876567878986234987921\n2345978943455678987976912367954934534569991935889898954321434568996543656998989997678989765349876899\n1234567891234589798987893459899895675698910123679767898543565679987654578987878998789199876598765798\n0123459932345697679999954598789789896987851234569656987654678789998765689876667899893234987679894626\n1245678943456789567899967987657678989986543447678945699865789899999898789765456789989349998789983515\n2356899767567893456799899898434569878987894758789234569976898999876949897654345679879498999899872103\n3987999878678912345987656789923459965698996769894345678989987689765634989993234798968987899998763212\n4598987989789923459876544567894598754569987878965467889899876599874329878789345987657676789987654323\n6679876799899854569865433456965679987678998989976567998789965499965498767678959876542545989998765434\n7898965678998765698765212368896789998789659597897678987678996989876987654567899984321234578999896545\n8987894389789876789854323456789998999898743456789789996567989878987898543468999893210345689999987678\n9456954245678989899965676567899897989919654568999899987679879967898987658979098764321456799989598989\n7677892134567892979876789878998775878909798679346989198798767856789998767889199765432347899875459899\n8789921012678943459987892989899654767698998791235679019987658347698999898991989899843678998764345679\n9899432123489994598998901997789543457567899892346789129876545236587899939690978998764589569853236789\n9998743484569889997899219875695432143456799943456789298765432123456789323489765679965694456932125898\n9899654567698779876789923994589521012345898954967898999877651012347897435678954567987892399893234567\n8798765679987654987999899965679432123476997899878967898998432123478976566789543476998943987789345678\n7659898789878743499998798896789543454569896989989854867899843254567898977995432345899659876568956799\n6546979898765432578998667799897654767698785678997643656798754365678959998976541276798969876489969896\n5435367999878543458998546678998767899987654567896532345679765476789345999987652567987897687349899945\n4321256789987676567987634567899878998998323456989421236789876587894233898998743459876789543246789434\n3210345678998987678998745688954989987569212345678965456789987698943102767899854598765897652125679323\n5421456789109999789987656789543499876432101234789876567891099789656813456932965679854999873234568912"""
for r, line in enumerate(s.splitlines()):
for c, val in enumerate(list(line)):
if val != "9":
rs.CopyObject(srf, [c, r, 0])
rs.EnableRedraw(True)
# manually merge individual surfaces into polysurfaces before continuing
polysurfs = rs.GetObjects(filter=16)
polysurfs.sort(key=rs.Area, reverse=True)
rs.SelectObjects(polysurfs[:3])
| 513.857143
| 10,208
| 0.969697
| 180
| 10,791
| 58.133333
| 0.833333
| 0.000573
| 0.000573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.941
| 0.013622
| 10,791
| 20
| 10,209
| 539.55
| 0.042089
| 0.012047
| 0
| 0
| 0
| 0.066667
| 0.956934
| 0.95684
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
92ff83bf64309733d47184550ec5cbe5024c7847
| 130
|
py
|
Python
|
emission/net/usercache/formatters/android/mode_confirm.py
|
Andrew-Tan/e-mission-server
|
91d59bee86e63d803e401f10f4b6a2502effedda
|
[
"BSD-3-Clause"
] | null | null | null |
emission/net/usercache/formatters/android/mode_confirm.py
|
Andrew-Tan/e-mission-server
|
91d59bee86e63d803e401f10f4b6a2502effedda
|
[
"BSD-3-Clause"
] | 1
|
2017-08-31T19:54:16.000Z
|
2017-08-31T19:54:16.000Z
|
emission/net/usercache/formatters/ios/purpose_confirm.py
|
Andrew-Tan/e-mission-server
|
91d59bee86e63d803e401f10f4b6a2502effedda
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import emission.net.usercache.formatters.generic.userlabel as fgl
def format(entry):
return fgl.format(entry)
| 21.666667
| 65
| 0.8
| 18
| 130
| 5.777778
| 0.777778
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 5
| 66
| 26
| 0.904348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
13769a1470b344343e6028fda1a9aa7c060b8fc0
| 219
|
py
|
Python
|
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
Spotkanie 3/tr_01.py
|
abixadamj/lekcja-enter-przyklady
|
4f23ee32a139e955f992b727ad86c6effb87a6d6
|
[
"MIT"
] | null | null | null |
width = 5.3
height = 3.67
triangle_area = (width * height) / 2
print("Pole trójkąta wynosi {triangle_area} cm^2")
print(f"Pole trójkąta wynosi {triangle_area} cm^2")
print("Pole trójkąta wynosi", triangle_area, "cm^2")
| 31.285714
| 52
| 0.726027
| 36
| 219
| 4.305556
| 0.388889
| 0.309677
| 0.348387
| 0.503226
| 0.741935
| 0.741935
| 0.741935
| 0.741935
| 0.503226
| 0
| 0
| 0.04712
| 0.127854
| 219
| 6
| 53
| 36.5
| 0.764398
| 0
| 0
| 0
| 0
| 0
| 0.484018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
13a635a9855b35d4adbb2053b7666709abea95f8
| 15,900
|
py
|
Python
|
ssd1306py/myfont24.py
|
ch686/ssd1306py-micropython
|
90a99f97b7b9da63d92716633cab046b18092ffb
|
[
"MIT"
] | null | null | null |
ssd1306py/myfont24.py
|
ch686/ssd1306py-micropython
|
90a99f97b7b9da63d92716633cab046b18092ffb
|
[
"MIT"
] | null | null | null |
ssd1306py/myfont24.py
|
ch686/ssd1306py-micropython
|
90a99f97b7b9da63d92716633cab046b18092ffb
|
[
"MIT"
] | null | null | null |
font24 = {
0xe6b094:
[0x00,0x00,0x00,0x00,0x80,0x70,0x3C,0x2C,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
0x20,0xA0,0x20,0x30,0x20,0x00,0x00,0x00,0x00,0x08,0x04,0x03,0x01,0x08,0x08,0x09,
0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0xFD,0x09,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x03,0x1F,0x30,0x20,0x60,0x7C,0x00,0x00],#"气"
0xe58e8b:
[0x00,0x00,0x00,0x00,0xF8,0xF8,0x08,0x08,0x08,0x08,0x08,0x08,0xE8,0xE8,0x08,0x08,
0x08,0x08,0x08,0x0C,0x0C,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x3F,0x00,0x08,
0x08,0x08,0x08,0x08,0xFF,0xFF,0x08,0x08,0x48,0x88,0x08,0x08,0x00,0x00,0x00,0x00,
0x00,0x40,0x30,0x0C,0x03,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x1F,0x1F,0x20,0x20,
0x20,0x21,0x27,0x22,0x10,0x10,0x00,0x00],#"压"
0xe6b8a9:
[0x00,0x00,0x00,0x04,0x08,0x38,0x80,0x40,0x00,0xF8,0x88,0x88,0x88,0x88,0x88,0x88,
0x88,0x88,0xF8,0x08,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x06,0x80,0x78,0x07,0x20,
0xC0,0x4F,0x48,0x48,0xC8,0x48,0x48,0xC8,0x48,0x48,0x4F,0xE0,0x40,0x00,0x00,0x00,
0x00,0x01,0x01,0x3F,0x3F,0x40,0x40,0x40,0x3F,0x40,0x40,0x40,0x3F,0x40,0x40,0x3F,
0x40,0x40,0x40,0x3F,0x40,0x20,0x20,0x00],#"温"
0xe5baa6:
[0x00,0x00,0x00,0x00,0xF0,0x10,0x10,0x10,0x10,0x30,0xD0,0x52,0x1C,0x18,0x10,0x10,
0xD0,0x50,0x10,0x10,0x98,0x10,0x00,0x00,0x00,0x00,0x00,0xC0,0xFF,0x02,0x02,0x42,
0x42,0x42,0xDF,0x52,0x52,0x52,0x52,0x52,0xDF,0xC2,0x42,0x01,0x01,0x01,0x00,0x00,
0x00,0x60,0x18,0x07,0x00,0x40,0x40,0x40,0x40,0x20,0x21,0x12,0x14,0x08,0x1C,0x16,
0x33,0x20,0x20,0x60,0x60,0x20,0x20,0x00],#"度"
0xE58589:
[0x00,0x00,0x00,0x00,0x00,0x10,0x60,0xC0,0x80,0x00,0x00,0xFC,0xFC,0x00,0x00,0x00,
0xC0,0x70,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x08,0x08,0x08,0x08,0x09,
0xF9,0x78,0x08,0x07,0x07,0xF8,0xFC,0x0A,0x09,0x08,0x08,0x08,0x04,0x04,0x00,0x00,
0x00,0x00,0x40,0x40,0x20,0x10,0x18,0x0E,0x03,0x00,0x00,0x00,0x00,0x0F,0x3F,0x20,
0x60,0x60,0x60,0x60,0x60,0x3F,0x20,0x00],#"光"
0xE785A7:
[0x00,0x00,0x00,0xF8,0x08,0x08,0x08,0x08,0xFC,0x08,0x00,0x08,0x88,0xE8,0x38,0x08,
0x88,0x88,0x88,0xFC,0x1C,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x42,0x42,0x42,0x42,
0xFF,0x00,0x04,0x02,0xFD,0xFC,0x84,0x84,0x84,0x85,0x85,0xFE,0x04,0x00,0x00,0x00,
0x00,0x00,0x20,0x39,0x1E,0x00,0x00,0x00,0x04,0x38,0x00,0x00,0x00,0x06,0x3C,0x38,
0x00,0x00,0x02,0x0C,0x38,0x30,0x00,0x00],#"照"
0xe28483:
[0x00,0x00,0x00,0x70,0x88,0x88,0x70,0x00,0x80,0xC0,0x60,0x30,0x10,0x10,0x10,0x10,
0x10,0x20,0x20,0xC0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7E,
0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x0C,0x08,0x18,0x10,0x10,0x10,
0x08,0x08,0x04,0x02,0x00,0x00,0x00,0x00],#"℃"
0x20:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00],#" "
0x2e:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1C,0x1C,0x1C,0x00,0x00,0x00,
0x00,0x00,0x00,0x00],#"."
0x3a:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x0E,0x0E,0x0E,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1C,0x1C,0x1C,
0x00,0x00,0x00,0x00],#":"
0x30:
[0x00,0x00,0x80,0xC0,0x60,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0xFE,0xFF,0x01,
0x00,0x00,0x00,0x00,0x01,0xFF,0xFE,0x00,0x00,0x01,0x07,0x0E,0x18,0x10,0x10,0x18,
0x0E,0x07,0x01,0x00],#"0"
0x31:
[0x00,0x00,0x80,0x80,0x80,0xC0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x1F,0x1F,0x10,
0x10,0x10,0x00,0x00],#"1"
0x32:
[0x00,0x80,0x40,0x20,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0x03,0x03,0x00,
0x80,0x40,0x20,0x38,0x1F,0x07,0x00,0x00,0x00,0x1C,0x1A,0x19,0x18,0x18,0x18,0x18,
0x18,0x1F,0x00,0x00],#"2"
0x33:
[0x00,0x80,0xC0,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0x00,0x03,0x03,0x00,
0x10,0x10,0x18,0x2F,0xE7,0x80,0x00,0x00,0x00,0x07,0x0F,0x10,0x10,0x10,0x10,0x18,
0x0F,0x07,0x00,0x00],#"3"
0x34:
[0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0x00,0x00,0x00,0x00,0xC0,0xB0,0x88,
0x86,0x81,0x80,0xFF,0xFF,0x80,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x1F,
0x1F,0x10,0x10,0x00],#"4"
0x35:
[0x00,0x00,0xE0,0x60,0x60,0x60,0x60,0x60,0x60,0x60,0x00,0x00,0x00,0x00,0x3F,0x10,
0x08,0x08,0x08,0x18,0xF0,0xE0,0x00,0x00,0x00,0x07,0x0B,0x10,0x10,0x10,0x10,0x1C,
0x0F,0x03,0x00,0x00],#"5"
0x36:
[0x00,0x00,0x80,0xC0,0x40,0x20,0x20,0x20,0xE0,0xC0,0x00,0x00,0x00,0xFC,0xFF,0x21,
0x10,0x08,0x08,0x08,0x18,0xF0,0xE0,0x00,0x00,0x01,0x07,0x0C,0x18,0x10,0x10,0x10,
0x08,0x0F,0x03,0x00],#"6"
0x37:
[0x00,0x00,0xC0,0xE0,0x60,0x60,0x60,0x60,0x60,0xE0,0x60,0x00,0x00,0x00,0x03,0x00,
0x00,0x00,0xE0,0x18,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1F,0x1F,0x00,
0x00,0x00,0x00,0x00],#"7"
0x38:
[0x00,0x80,0xC0,0x60,0x20,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x87,0xEF,0x2C,
0x18,0x18,0x30,0x30,0x68,0xCF,0x83,0x00,0x00,0x07,0x0F,0x08,0x10,0x10,0x10,0x10,
0x18,0x0F,0x07,0x00],#"8"
0x39:
[0x00,0x00,0xC0,0xC0,0x20,0x20,0x20,0x20,0xC0,0x80,0x00,0x00,0x00,0x1F,0x3F,0x60,
0x40,0x40,0x40,0x20,0x10,0xFF,0xFE,0x00,0x00,0x00,0x0C,0x1C,0x10,0x10,0x10,0x08,
0x0F,0x03,0x00,0x00],#"9"
0x41:
[0x00,0x00,0x00,0x00,0x80,0xE0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x7C,
0x43,0x40,0x47,0x7F,0xF8,0x80,0x00,0x00,0x10,0x18,0x1F,0x10,0x00,0x00,0x00,0x00,
0x13,0x1F,0x1C,0x10],#"A"
0x42:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0xFF,0xFF,0x10,
0x10,0x10,0x10,0x18,0x2F,0xE7,0x80,0x00,0x10,0x1F,0x1F,0x10,0x10,0x10,0x10,0x10,
0x18,0x0F,0x07,0x00],#"B"
0x43:
[0x00,0x00,0x80,0xC0,0x40,0x20,0x20,0x20,0x20,0x60,0xE0,0x00,0x00,0xFC,0xFF,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x01,0x07,0x0E,0x18,0x10,0x10,0x10,
0x08,0x04,0x03,0x00],#"C"
0x44:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x40,0xC0,0x80,0x00,0x00,0x00,0xFF,0xFF,0x00,
0x00,0x00,0x00,0x00,0x01,0xFF,0xFE,0x00,0x10,0x1F,0x1F,0x10,0x10,0x10,0x18,0x08,
0x0E,0x07,0x01,0x00],#"D"
0x45:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x20,0x20,0x60,0x80,0x00,0x00,0xFF,0xFF,0x10,
0x10,0x10,0x10,0x7C,0x00,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,0x10,0x10,0x10,0x10,
0x10,0x18,0x06,0x00],#"E"
0x46:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x20,0x60,0x60,0x80,0x00,0x00,0xFF,0xFF,0x10,
0x10,0x10,0x10,0x7C,0x00,0x00,0x01,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00],#"F"
0x47:
[0x00,0x00,0x80,0xC0,0x60,0x20,0x20,0x20,0x40,0xE0,0x00,0x00,0x00,0xFC,0xFF,0x01,
0x00,0x00,0x40,0x40,0xC0,0xC1,0x40,0x40,0x00,0x01,0x07,0x0E,0x18,0x10,0x10,0x10,
0x0F,0x0F,0x00,0x00],#"G"
0x48:
[0x20,0xE0,0xE0,0x20,0x00,0x00,0x00,0x00,0x20,0xE0,0xE0,0x20,0x00,0xFF,0xFF,0x10,
0x10,0x10,0x10,0x10,0x10,0xFF,0xFF,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x00,0x00,
0x10,0x1F,0x1F,0x10],#"H"
0x49:
[0x00,0x00,0x20,0x20,0x20,0xE0,0xE0,0x20,0x20,0x20,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x1F,0x1F,0x10,
0x10,0x10,0x00,0x00],#"I"
0x4a:
[0x00,0x00,0x00,0x00,0x20,0x20,0x20,0xE0,0xE0,0x20,0x20,0x20,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x60,0xE0,0x80,0x80,0x80,0xC0,0x7F,
0x3F,0x00,0x00,0x00],#"J"
0x4b:
[0x20,0xE0,0xE0,0x20,0x00,0x00,0x20,0xA0,0x60,0x20,0x20,0x00,0x00,0xFF,0xFF,0x30,
0x18,0x7C,0xE3,0xC0,0x00,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x01,0x13,
0x1F,0x1C,0x18,0x10],#"K"
0x4c:
[0x20,0xE0,0xE0,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,0x10,0x10,0x10,0x10,
0x10,0x18,0x06,0x00],#"L"
0x4d:
[0x20,0xE0,0xE0,0xE0,0x00,0x00,0x00,0x00,0xE0,0xE0,0xE0,0x20,0x00,0xFF,0x01,0x3F,
0xFE,0xC0,0xE0,0x1E,0x01,0xFF,0xFF,0x00,0x10,0x1F,0x10,0x00,0x03,0x1F,0x03,0x00,
0x10,0x1F,0x1F,0x10],#"M"
0x4e:
[0x20,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x20,0xE0,0x20,0x00,0xFF,0x00,0x03,
0x07,0x1C,0x78,0xE0,0x80,0x00,0xFF,0x00,0x10,0x1F,0x10,0x00,0x00,0x00,0x00,0x00,
0x03,0x0F,0x1F,0x00],#"N"
0x4f:
[0x00,0x00,0x80,0xC0,0x60,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0xFE,0xFF,0x01,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFE,0x00,0x00,0x01,0x07,0x0E,0x18,0x10,0x10,0x18,
0x0C,0x07,0x01,0x00],#"O"
0x50:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0xFF,0xFF,0x20,
0x20,0x20,0x20,0x20,0x30,0x1F,0x0F,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00],#"P"
0x51:
[0x00,0x00,0x80,0xC0,0x60,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0x00,0xFE,0xFF,0x01,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFE,0x00,0x00,0x01,0x07,0x0E,0x11,0x11,0x13,0x3C,
0x7C,0x67,0x21,0x00],#"Q"
0x52:
[0x20,0xE0,0xE0,0x20,0x20,0x20,0x20,0x20,0x60,0xC0,0x80,0x00,0x00,0xFF,0xFF,0x10,
0x10,0x30,0xF0,0xD0,0x08,0x0F,0x07,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x00,0x03,
0x0F,0x1C,0x10,0x10],#"R"
0x53:
[0x00,0x80,0xC0,0x60,0x20,0x20,0x20,0x20,0x40,0x40,0xE0,0x00,0x00,0x07,0x0F,0x0C,
0x18,0x18,0x30,0x30,0x60,0xE0,0x81,0x00,0x00,0x1F,0x0C,0x08,0x10,0x10,0x10,0x10,
0x18,0x0F,0x07,0x00],#"S"
0x54:
[0x80,0x60,0x20,0x20,0x20,0xE0,0xE0,0x20,0x20,0x20,0x60,0x80,0x01,0x00,0x00,0x00,
0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,
0x00,0x00,0x00,0x00],#"T"
0x55:
[0x20,0xE0,0xE0,0x20,0x00,0x00,0x00,0x00,0x00,0x20,0xE0,0x20,0x00,0xFF,0xFF,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x07,0x0F,0x18,0x10,0x10,0x10,0x10,
0x10,0x08,0x07,0x00],#"U"
0x56:
[0x20,0x60,0xE0,0xE0,0x20,0x00,0x00,0x00,0x20,0xE0,0x60,0x20,0x00,0x00,0x07,0x7F,
0xF8,0x80,0x00,0x80,0x7C,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x1F,0x1C,0x07,
0x00,0x00,0x00,0x00],#"V"
0x57:
[0x20,0xE0,0xE0,0x20,0x00,0xE0,0xE0,0x20,0x00,0x20,0xE0,0x20,0x00,0x07,0xFF,0xF8,
0xE0,0x1F,0xFF,0xFC,0xE0,0x1F,0x00,0x00,0x00,0x00,0x03,0x1F,0x03,0x00,0x01,0x1F,
0x03,0x00,0x00,0x00],#"W"
0x58:
[0x00,0x20,0x60,0xE0,0xA0,0x00,0x00,0x20,0xE0,0x60,0x20,0x00,0x00,0x00,0x00,0x03,
0x8F,0x7C,0xF8,0xC6,0x01,0x00,0x00,0x00,0x00,0x10,0x18,0x1E,0x13,0x00,0x01,0x17,
0x1F,0x18,0x10,0x00],#"X"
0x59:
[0x20,0x60,0xE0,0xE0,0x20,0x00,0x00,0x00,0x20,0xE0,0x60,0x20,0x00,0x00,0x01,0x07,
0x3E,0xF8,0xE0,0x18,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x1F,0x1F,0x10,
0x10,0x00,0x00,0x00],#"Y"
0x5a:
[0x00,0x80,0x60,0x20,0x20,0x20,0x20,0xA0,0xE0,0xE0,0x20,0x00,0x00,0x00,0x00,0x00,
0xC0,0xF0,0x3E,0x0F,0x03,0x00,0x00,0x00,0x00,0x10,0x1C,0x1F,0x17,0x10,0x10,0x10,
0x10,0x18,0x06,0x00],#"Z"
0x61:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x98,0xD8,
0x44,0x64,0x24,0x24,0xFC,0xF8,0x00,0x00,0x00,0x0F,0x1F,0x18,0x10,0x10,0x10,0x08,
0x1F,0x1F,0x10,0x18],#"a"
0x62:
[0x00,0x20,0xE0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,
0x18,0x08,0x04,0x04,0x0C,0xF8,0xF0,0x00,0x00,0x00,0x1F,0x0F,0x18,0x10,0x10,0x10,
0x18,0x0F,0x03,0x00],#"b"
0x63:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0xF8,0x18,
0x04,0x04,0x04,0x3C,0x38,0x00,0x00,0x00,0x00,0x03,0x0F,0x0C,0x10,0x10,0x10,0x10,
0x08,0x06,0x00,0x00],#"c"
0x64:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0xE0,0xF0,0x00,0x00,0x00,0xE0,0xF8,0x1C,
0x04,0x04,0x04,0x08,0xFF,0xFF,0x00,0x00,0x00,0x03,0x0F,0x18,0x10,0x10,0x10,0x08,
0x1F,0x0F,0x08,0x00],#"d"
0x65:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0xF8,
0x48,0x44,0x44,0x44,0x4C,0x78,0x70,0x00,0x00,0x00,0x03,0x0F,0x0C,0x18,0x10,0x10,
0x10,0x08,0x04,0x00],#"e"
0x66:
[0x00,0x00,0x00,0x00,0x80,0xC0,0x60,0x20,0x20,0xE0,0xC0,0x00,0x00,0x04,0x04,0x04,
0xFF,0xFF,0x04,0x04,0x04,0x04,0x00,0x00,0x00,0x00,0x10,0x10,0x1F,0x1F,0x10,0x10,
0x10,0x00,0x00,0x00],#"f"
0x67:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x70,0xF8,
0x8C,0x04,0x04,0x8C,0xF8,0x74,0x04,0x0C,0x00,0x70,0x76,0xCF,0x8D,0x8D,0x8D,0x89,
0xC8,0x78,0x70,0x00],#"g"
0x68:
[0x00,0x20,0xE0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,
0x08,0x04,0x04,0x04,0xFC,0xF8,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x10,
0x1F,0x1F,0x10,0x00],#"h"
0x69:
[0x00,0x00,0x00,0x00,0x00,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x04,
0x04,0xFC,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x1F,0x1F,0x10,
0x10,0x10,0x00,0x00],#"i"
0x6a:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x04,0x04,0x04,0xFC,0xFC,0x00,0x00,0x00,0x00,0x00,0xC0,0xC0,0x80,0x80,0xC0,0x7F,
0x3F,0x00,0x00,0x00],#"j"
0x6b:
[0x00,0x20,0xE0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,
0x80,0xC0,0xF4,0x1C,0x04,0x04,0x00,0x00,0x00,0x10,0x1F,0x1F,0x11,0x00,0x03,0x1F,
0x1C,0x10,0x10,0x00],#"k"
0x6c:
[0x00,0x00,0x20,0x20,0x20,0xE0,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x1F,0x1F,0x10,
0x10,0x10,0x00,0x00],#"l"
0x6d:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0xFC,0xFC,0x08,
0x04,0xFC,0xFC,0x08,0x04,0xFC,0xFC,0x00,0x10,0x1F,0x1F,0x10,0x00,0x1F,0x1F,0x10,
0x00,0x1F,0x1F,0x10],#"m"
0x6e:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0xFC,0xFC,
0x08,0x08,0x04,0x04,0xFC,0xF8,0x00,0x00,0x00,0x10,0x1F,0x1F,0x10,0x00,0x00,0x10,
0x1F,0x1F,0x10,0x00],#"n"
0x6f:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0xF0,0x18,
0x0C,0x04,0x04,0x0C,0x18,0xF0,0xE0,0x00,0x00,0x03,0x0F,0x0C,0x10,0x10,0x10,0x10,
0x0C,0x0F,0x03,0x00],#"o"
0x70:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0xFC,0xFC,
0x08,0x04,0x04,0x04,0x0C,0xF8,0xF0,0x00,0x00,0x80,0xFF,0xFF,0x88,0x90,0x10,0x10,
0x1C,0x0F,0x03,0x00],#"p"
0x71:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0xF8,0x1C,
0x04,0x04,0x04,0x08,0xF8,0xFC,0x00,0x00,0x00,0x03,0x0F,0x18,0x10,0x10,0x90,0x88,
0xFF,0xFF,0x80,0x00],#"q"
0x72:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x04,0x04,0xFC,
0xFC,0x10,0x08,0x04,0x04,0x0C,0x0C,0x00,0x10,0x10,0x10,0x1F,0x1F,0x10,0x10,0x10,
0x00,0x00,0x00,0x00],#"r"
0x73:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x78,
0xCC,0xC4,0x84,0x84,0x84,0x0C,0x1C,0x00,0x00,0x00,0x1E,0x18,0x10,0x10,0x10,0x11,
0x19,0x0F,0x06,0x00],#"s"
0x74:
[0x00,0x00,0x00,0x00,0x00,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x04,0x04,
0xFF,0xFF,0x04,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x1F,0x10,0x10,
0x10,0x0C,0x00,0x00],#"t"
0x75:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0xFC,0xFE,
0x00,0x00,0x00,0x04,0xFC,0xFE,0x00,0x00,0x00,0x00,0x0F,0x1F,0x18,0x10,0x10,0x08,
0x1F,0x0F,0x08,0x00],#"u"
0x76:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x0C,0x3C,
0xFC,0xC4,0x00,0x00,0xC4,0x3C,0x0C,0x04,0x00,0x00,0x00,0x00,0x01,0x0F,0x1E,0x0E,
0x01,0x00,0x00,0x00],#"v"
0x77:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x3C,0xFC,0xC4,
0x00,0xE4,0x7C,0xFC,0x84,0x80,0x7C,0x04,0x00,0x00,0x07,0x1F,0x07,0x00,0x00,0x07,
0x1F,0x07,0x00,0x00],#"w"
0x78:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x04,0x1C,
0x7C,0xE4,0xC0,0x34,0x1C,0x04,0x04,0x00,0x00,0x10,0x10,0x1C,0x16,0x01,0x13,0x1F,
0x1C,0x18,0x10,0x00],#"x"
0x79:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x0C,0x3C,
0xFC,0xC4,0x00,0xC4,0x3C,0x04,0x04,0x00,0x00,0x00,0xC0,0x80,0xC1,0x37,0x0E,0x01,
0x00,0x00,0x00,0x00],#"y"
0x7a:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1C,0x04,
0x04,0xC4,0xF4,0x7C,0x1C,0x04,0x00,0x00,0x00,0x00,0x10,0x1C,0x1F,0x17,0x11,0x10,
0x10,0x18,0x0E,0x00]#"z"
}
| 51.125402
| 83
| 0.74
| 2,987
| 15,900
| 3.939739
| 0.066622
| 0.598912
| 0.676071
| 0.689327
| 0.731815
| 0.655337
| 0.582257
| 0.505778
| 0.431679
| 0.381883
| 0
| 0.53313
| 0.053648
| 15,900
| 310
| 84
| 51.290323
| 0.24882
| 0.013585
| 0
| 0.171053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.749055
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
13ccc6cad11fffc26408777c1272f6491a2d9ce5
| 16,485
|
py
|
Python
|
google/cloud/servicedirectory/v1beta1/servicedirectory-v1beta1-py/google/cloud/servicedirectory_v1beta1/services/registration_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/servicedirectory/v1beta1/servicedirectory-v1beta1-py/google/cloud/servicedirectory_v1beta1/services/registration_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/servicedirectory/v1beta1/servicedirectory-v1beta1-py/google/cloud/servicedirectory_v1beta1/services/registration_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.cloud.servicedirectory_v1beta1.types import endpoint
from google.cloud.servicedirectory_v1beta1.types import namespace
from google.cloud.servicedirectory_v1beta1.types import registration_service
from google.cloud.servicedirectory_v1beta1.types import service
class ListNamespacesPager:
"""A pager for iterating through ``list_namespaces`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse` object, and
provides an ``__iter__`` method to iterate through its
``namespaces`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListNamespaces`` requests and continue to iterate
through the ``namespaces`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., registration_service.ListNamespacesResponse],
request: registration_service.ListNamespacesRequest,
response: registration_service.ListNamespacesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListNamespacesRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListNamespacesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[registration_service.ListNamespacesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[namespace.Namespace]:
for page in self.pages:
yield from page.namespaces
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListNamespacesAsyncPager:
"""A pager for iterating through ``list_namespaces`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``namespaces`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListNamespaces`` requests and continue to iterate
through the ``namespaces`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[registration_service.ListNamespacesResponse]],
request: registration_service.ListNamespacesRequest,
response: registration_service.ListNamespacesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListNamespacesRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListNamespacesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListNamespacesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[registration_service.ListNamespacesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[namespace.Namespace]:
async def async_generator():
async for page in self.pages:
for response in page.namespaces:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListServicesPager:
"""A pager for iterating through ``list_services`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListServicesResponse` object, and
provides an ``__iter__`` method to iterate through its
``services`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListServices`` requests and continue to iterate
through the ``services`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListServicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., registration_service.ListServicesResponse],
request: registration_service.ListServicesRequest,
response: registration_service.ListServicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListServicesRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListServicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListServicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[registration_service.ListServicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[service.Service]:
for page in self.pages:
yield from page.services
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListServicesAsyncPager:
"""A pager for iterating through ``list_services`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListServicesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``services`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListServices`` requests and continue to iterate
through the ``services`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListServicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[registration_service.ListServicesResponse]],
request: registration_service.ListServicesRequest,
response: registration_service.ListServicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListServicesRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListServicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListServicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[registration_service.ListServicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[service.Service]:
async def async_generator():
async for page in self.pages:
for response in page.services:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListEndpointsPager:
"""A pager for iterating through ``list_endpoints`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse` object, and
provides an ``__iter__`` method to iterate through its
``endpoints`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListEndpoints`` requests and continue to iterate
through the ``endpoints`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., registration_service.ListEndpointsResponse],
request: registration_service.ListEndpointsRequest,
response: registration_service.ListEndpointsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListEndpointsRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListEndpointsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[registration_service.ListEndpointsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[endpoint.Endpoint]:
for page in self.pages:
yield from page.endpoints
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListEndpointsAsyncPager:
"""A pager for iterating through ``list_endpoints`` requests.
This class thinly wraps an initial
:class:`google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``endpoints`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListEndpoints`` requests and continue to iterate
through the ``endpoints`` field on the
corresponding responses.
All the usual :class:`google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[registration_service.ListEndpointsResponse]],
request: registration_service.ListEndpointsRequest,
response: registration_service.ListEndpointsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.servicedirectory_v1beta1.types.ListEndpointsRequest):
The initial request object.
response (google.cloud.servicedirectory_v1beta1.types.ListEndpointsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = registration_service.ListEndpointsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[registration_service.ListEndpointsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]:
async def async_generator():
async for page in self.pages:
for response in page.endpoints:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 42.487113
| 95
| 0.682924
| 1,757
| 16,485
| 6.189528
| 0.103017
| 0.052966
| 0.069517
| 0.08754
| 0.922115
| 0.922115
| 0.922115
| 0.904092
| 0.89554
| 0.89554
| 0
| 0.006119
| 0.236639
| 16,485
| 387
| 96
| 42.596899
| 0.858074
| 0.460237
| 0
| 0.786585
| 0
| 0
| 0.00746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164634
| false
| 0
| 0.030488
| 0.073171
| 0.323171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b939ab11c953c2f8f929148816ae9e1a26464d52
| 25,959
|
py
|
Python
|
layers/ModConv2d.py
|
Egor-kokhan/StyleGANv2-genart-keras
|
64db59a8df7b61331a1c19aadc8a61219df97813
|
[
"MIT"
] | 1
|
2022-02-27T10:18:04.000Z
|
2022-02-27T10:18:04.000Z
|
layers/ModConv2d.py
|
Egor-kokhan/StyleGANv2-genart-keras
|
64db59a8df7b61331a1c19aadc8a61219df97813
|
[
"MIT"
] | 2
|
2021-04-11T12:44:59.000Z
|
2021-04-21T11:39:33.000Z
|
layers/ModConv2d.py
|
Egor-kokhan/StyleGANv2-genart-keras
|
64db59a8df7b61331a1c19aadc8a61219df97813
|
[
"MIT"
] | 1
|
2021-04-08T16:33:51.000Z
|
2021-04-08T16:33:51.000Z
|
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
from upfirdn_2d import *
from layers.other import Dense, normalize_2nd_moment
NOISE_STRENGTH = 0.001
# ToRGB block.
def torgb(x, y, latents, res_name, is_grouped, style_strength_map=None): # res = 2..resolution_log2
if not is_grouped:
t = ModConv2d(rank=2, sampling=None, filters=3, kernel_size=1, demodulate=False, noise=True, act=None, name=res_name+'/ToRGB')([x, latents[0:1, -1]])
else:
t = ModConv2d_grouped(rank=2, sampling=None, filters=3, kernel_size=1, demodulate=False, noise=True, act=None, name=res_name+'/ToRGB')([x, latents])
t = tf.reduce_sum(t * style_strength_map, axis=1)
if y is not None:
t += tf.cast(y, t.dtype)
return t
class ModConv2d(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
sampling, # [None, 'up', 'down']
strides=1,
act='lrelu',
noise=True,
demodulate=True,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(ModConv2d, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.noise = noise
self.demodulate = demodulate
self.act = act
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=self.rank + 2), InputSpec(ndim=self.rank)]
self.sampling = sampling
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape[0])
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.modulate_style = Dense(units=input_shape[-1], constant_b=0.0, act=None, name='mod_weight')
self.noise_strength = self.add_weight(
name='noise_strength',
shape=1,
initializer=tf.initializers.zeros(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=False,
dtype=self.dtype)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
conv_inputs = inputs[0]
print('styled: ', conv_inputs)
style = inputs[1]
weights = self.kernel
he_std = 1.0 / tf.math.sqrt(tf.dtypes.cast(tf.math.reduce_prod(weights.shape[:-1]), tf.float32))
runtime_coef = he_std * 1.0
# runtime_coef = 1.0
weights = weights*runtime_coef
style = self.modulate_style(style) + 1.0
if self.demodulate:
style *= 1 / tf.reduce_max(tf.abs(style)) # Pre-normalize to avoid float16 overflow.
weights = weights*style[0, np.newaxis, np.newaxis, :, np.newaxis]
# Demodulate
if self.demodulate: ##########??????
d = tf.math.rsqrt(tf.math.reduce_sum(tf.math.square(weights), axis=[0, 1, 2]) + 1e-8) # [BO] Scaling factor.
weights *= d[np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# conv_inputs = conv_inputs*style[0, np.newaxis, np.newaxis, :] # ##################
# Convolve
padding = 0
kernel = self.kernel_size[0]
resample_kernel = [1,3,3,1]
data_format = 'NHWC' #'NCHW'
if self.sampling == 'up':
x = upsample_conv_2d(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
elif self.sampling == 'down':
x = conv_downsample_2d(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
else:
padding_mode = {0: 'SAME', -(kernel // 2): 'VALID'}[padding]
x = tf.nn.conv2d(conv_inputs, weights, data_format=data_format, strides=[1, 1, 1, 1], padding=padding_mode)
##############################
if self.noise:
noise = tf.random.normal([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1], dtype=x.dtype)
x += noise*self.noise_strength*NOISE_STRENGTH
x = nn.bias_add(x, self.bias, data_format=data_format)
if self.act == 'lrelu':
x = tf.nn.leaky_relu(x, alpha=0.2)*tf.math.sqrt(2.0)
elif self.act == 'linear' or self.act is None:
pass
else:
raise ValueError('Activation is unsupported.')
return x
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModConv2d, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
class ModConv2d_grouped(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
sampling, # [None, 'up', 'down']
strides=1,
act='lrelu',
noise=True,
demodulate=True,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(ModConv2d_grouped, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.noise = noise
self.demodulate = demodulate
self.act = act
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=self.rank + 2), InputSpec(ndim=self.rank + 1)]
self.sampling = sampling
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape[0])
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.modulate_style = Dense(units=input_shape[-1], constant_b=0.0, act=None, name='mod_weight')
self.noise_strength = self.add_weight(
name='noise_strength',
shape=1,
initializer=tf.initializers.zeros(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=False,
dtype=self.dtype)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
conv_inputs = inputs[0]
style = inputs[1][0]
weights = self.kernel[np.newaxis]
# print(f"conv_inputs: {conv_inputs}, style: {style}, weights: {weights}")
he_std = 1.0 / tf.math.sqrt(tf.dtypes.cast(tf.math.reduce_prod(weights.shape[:-1]), tf.float32))
runtime_coef = he_std * 1.0
weights = weights*runtime_coef
# Modulate.
style = self.modulate_style(style) + 1.0
if self.demodulate: #################################
style *= 1 / tf.reduce_max(tf.abs(style), axis=1, keepdims=True) # Pre-normalize to avoid float16 overflow.
weights = weights*style[:, np.newaxis, np.newaxis, :, np.newaxis]
# print('demod')
# Demodulate
if self.demodulate:############
d = tf.math.rsqrt(tf.math.reduce_sum(tf.math.square(weights), axis=[1, 2, 3], keepdims=True) + 1e-8) # [BO] Scaling factor.
weights *= d # [BkkIO] Scale output feature maps.
# print("conv_inputs before reshaping", conv_inputs)
# conv_inputs = tf.reshape(conv_inputs, [1, -1, conv_inputs.shape[2], conv_inputs.shape[3]]) # Fused => reshape minibatch to convolution groups.
# print("conv_inputs after reshaping", conv_inputs)
# print('weights before reshaping: ', weights)
weights = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])
# print('weights after reshaping: ', weights)
# Convolve
padding = 0
kernel = self.kernel_size[0]
resample_kernel = [1,3,3,1]
data_format = 'NHWC' #'NCHW'
if self.sampling == 'up':
# print('up')
x = upsample_conv_2d_grouped(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
else:
padding_mode = {0: 'SAME', -(kernel // 2): 'VALID'}[padding]
x = tf.nn.conv2d(conv_inputs, weights, data_format=data_format, strides=[1, 1, 1, 1], padding=padding_mode)
out_shape = [-1,
inputs[0].shape[1] * 2 if self.sampling == 'up' else inputs[0].shape[1],
inputs[0].shape[2] * 2 if self.sampling == 'up' else inputs[0].shape[2],
style.shape[0],
self.filters,
]
# print(x)
x = tf.reshape(x, out_shape) # Fused => reshape convolution groups back to minibatch.
# print(x)
x = tf.transpose(x, [0, 3, 1, 2, 4])
# x = tf.transpose(x, [0, 2, 3, 4, 1])
# print(x)
# print(x)
# print(x)
##############################
if self.noise:
noise = tf.random.normal([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1, 1], dtype=x.dtype)
x += noise*self.noise_strength*NOISE_STRENGTH
# print(x)
x = nn.bias_add(x, self.bias, data_format=data_format)
# print(x)
# 1 / 0
if self.act == 'lrelu':
x = tf.nn.leaky_relu(x, alpha=0.2)*tf.math.sqrt(2.0)
elif self.act == 'linear' or self.act is None:
pass
else:
raise ValueError('Activation is unsupported.')
return x
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModConv2d_grouped, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
| 42.625616
| 157
| 0.659656
| 3,287
| 25,959
| 5.048068
| 0.099483
| 0.024107
| 0.025312
| 0.0226
| 0.91177
| 0.885614
| 0.86657
| 0.847526
| 0.847526
| 0.83716
| 0
| 0.011661
| 0.233599
| 25,959
| 608
| 158
| 42.695724
| 0.822367
| 0.261797
| 0
| 0.834483
| 0
| 0
| 0.051752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043678
| false
| 0.004598
| 0.057471
| 0
| 0.149425
| 0.002299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9702a06b9ed7fc4d9a817c2f64d9dd97f558e62
| 2,572
|
py
|
Python
|
weblogic/datadog_checks/weblogic/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
weblogic/datadog_checks/weblogic/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
weblogic/datadog_checks/weblogic/config_models/defaults.py
|
kjmadscience/integrations-core
|
663bdf44730dd6c9f3565c121318b320bfcb4988
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_collect_default_metrics(field, value):
return False
def shared_conf(field, value):
return get_default_field_value(field, value)
def shared_new_gc_metrics(field, value):
return False
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_service_check_prefix(field, value):
return get_default_field_value(field, value)
def instance_collect_default_jvm_metrics(field, value):
return True
def instance_empty_default_hostname(field, value):
return False
def instance_is_jmx(field, value):
return False
def instance_java_bin_path(field, value):
return get_default_field_value(field, value)
def instance_java_options(field, value):
return get_default_field_value(field, value)
def instance_jmx_url(field, value):
return get_default_field_value(field, value)
def instance_key_store_password(field, value):
return get_default_field_value(field, value)
def instance_key_store_path(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_name(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_process_name_regex(field, value):
return get_default_field_value(field, value)
def instance_rmi_client_timeout(field, value):
return 15000
def instance_rmi_connection_timeout(field, value):
return 20000
def instance_rmi_registry_ssl(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_tools_jar_path(field, value):
return get_default_field_value(field, value)
def instance_trust_store_password(field, value):
return get_default_field_value(field, value)
def instance_trust_store_path(field, value):
return get_default_field_value(field, value)
def instance_user(field, value):
return get_default_field_value(field, value)
| 22.365217
| 105
| 0.783437
| 368
| 2,572
| 5.152174
| 0.269022
| 0.32173
| 0.219409
| 0.189873
| 0.640295
| 0.640295
| 0.589662
| 0.550633
| 0.550633
| 0.526371
| 0
| 0.00772
| 0.143857
| 2,572
| 114
| 106
| 22.561404
| 0.853315
| 0.132193
| 0
| 0.415094
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.490566
| false
| 0.056604
| 0.018868
| 0.490566
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
b9768f71ceac6b148aebf8ed5448843fbff65b92
| 5,670
|
py
|
Python
|
smart_purchase/smart_purchase/doctype/smart_purchase/smart_purchase.py
|
hrgadeha/sp
|
374902a7fba3c0a26fbaf79592fd15b70f7d7187
|
[
"MIT"
] | null | null | null |
smart_purchase/smart_purchase/doctype/smart_purchase/smart_purchase.py
|
hrgadeha/sp
|
374902a7fba3c0a26fbaf79592fd15b70f7d7187
|
[
"MIT"
] | null | null | null |
smart_purchase/smart_purchase/doctype/smart_purchase/smart_purchase.py
|
hrgadeha/sp
|
374902a7fba3c0a26fbaf79592fd15b70f7d7187
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
from datetime import date
from datetime import datetime, timedelta
from frappe import msgprint
from frappe.model.document import Document
class SmartPurchase(Document):
def on_submit(self):
if self.order_for == "All Items From Table":
items = []
for i in self.items:
item_li = {"item_code": i.item_code,"qty": i.qty,"rate": i.rate,"amount":i.amount,"stock_uom":i.uom,"schedule_date":date.today(),"material_request": i.mr,"material_request_item": i.mri}
items.append(item_li)
purchase_order = frappe.get_doc({
"doctype": "Purchase Order",
"supplier": self.supplier,
"transaction_date": date.today(),
"schedule_date": date.today(),
"set_warehouse": self.for_warehouse,
"items": items
})
purchase_order.insert(ignore_permissions=True)
purchase_order.save()
msgprint("Purchase Order Created")
if self.order_for == "Selected Items From Table":
selected_items = []
for i in self.items:
if i.use_this == 1:
item_li = {"item_code": i.item_code,"qty": i.qty,"rate": i.rate,"amount":i.amount,"stock_uom":i.uom,"schedule_date":date.today(),"material_request": i.mr,"material_request_item": i.mri}
selected_items.append(item_li)
purchase_order = frappe.get_doc({
"doctype": "Purchase Order",
"supplier": self.supplier,
"transaction_date": date.today(),
"schedule_date": date.today(),
"set_warehouse": self.for_warehouse,
"items": selected_items
})
purchase_order.insert(ignore_permissions=True)
purchase_order.save()
msgprint("Purchase Order Created")
@frappe.whitelist(allow_guest=True)
def insert_data_only_group(item_group,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand,mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0 and mri.item_group = %s
and (mr.schedule_date between %s and %s);""",(item_group,from_date,to_date),as_list=1)
return mt
@frappe.whitelist(allow_guest=True)
def insert_data_with_brand_group(item_group,brand,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand,mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0 and
mri.item_group = %s and mri.brand = %s
and (mr.schedule_date between %s and %s);""",(item_group,brand,from_date,to_date),as_list=1)
return mt
@frappe.whitelist(allow_guest=True)
def insert_data_all(item_group,brand,item_code,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand,mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0
and mri.item_group = %s and mri.brand = %s and mri.item_code = %s
and (mr.schedule_date between %s and %s);""",(item_group,brand,item_code,from_date,to_date),as_list=1)
return mt
@frappe.whitelist(allow_guest=True)
def insert_data_only_brand(brand,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand,mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0 and mri.brand = %s
and (mr.schedule_date between %s and %s);""",(brand,from_date,to_date),as_list=1)
return mt
@frappe.whitelist(allow_guest=True)
def insert_data_brand_item(brand,item_code,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand,mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0
and mri.brand = %s and mri.item_code = %s
;""",(brand,item_code,from_date,to_date),as_list=1)
return mt
@frappe.whitelist(allow_guest=True)
def insert_data_only_item(item_code,from_date,to_date):
mt = frappe.db.sql("""select mri.item_code, (mri.qty - mri.ordered_qty), mri.stock_uom, mri.rate, mri.amount,
mri.item_name,mri.description,mri.item_group,mri.brand, mri.parent, mri.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mri where (mri.ordered_qty != mri.qty) and
mr.docstatus = 1 and mri.parent = mr.name and mri.unused = 0 and mri.item_code = %s
and (mr.schedule_date between %s and %s);""",(item_code,from_date,to_date),as_list=1)
return mt
| 53.490566
| 190
| 0.667196
| 843
| 5,670
| 4.29656
| 0.103203
| 0.046383
| 0.033131
| 0.046383
| 0.905025
| 0.905025
| 0.891496
| 0.889012
| 0.889012
| 0.870514
| 0
| 0.004216
| 0.205115
| 5,670
| 105
| 191
| 54
| 0.799423
| 0
| 0
| 0.608696
| 0
| 0.23913
| 0.585714
| 0.075309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0
| 0.065217
| 0
| 0.217391
| 0.032609
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9f8728e6d7557630d0e645684f34330c3d10fda
| 6,098
|
py
|
Python
|
Tensile/Tests/extended/convolution_config/test_conv_vs_contraction.py
|
micmelesse/Tensile
|
62fb9a16909ddef08010915cfefe4c0341f48daa
|
[
"MIT"
] | 1
|
2021-12-03T09:42:10.000Z
|
2021-12-03T09:42:10.000Z
|
Tensile/Tests/extended/convolution_config/test_conv_vs_contraction.py
|
micmelesse/Tensile
|
62fb9a16909ddef08010915cfefe4c0341f48daa
|
[
"MIT"
] | 1
|
2020-06-22T19:28:26.000Z
|
2020-06-22T19:28:26.000Z
|
Tensile/Tests/extended/convolution_config/test_conv_vs_contraction.py
|
micmelesse/Tensile
|
62fb9a16909ddef08010915cfefe4c0341f48daa
|
[
"MIT"
] | null | null | null |
import logging,pytest
from Tensile.SolutionStructs import Convolution
log =logging.getLogger("testlog")
"""
These tests run the convolution-vs-contraction mode always
"""
def test_simple(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '1x1',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_stride1x2(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '1x2',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
@pytest.mark.skip(reason="dilationY breaks conv reference model")
def test_stride2x1(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '2x1',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
@pytest.mark.skip(reason="dilationY breaks conv reference model")
def test_stride2x3(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '2x3',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_filter1x2(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x2',
'Stride': '1x1',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_filter2x1(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '2x1',
'Stride': '1x1',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_filter2x3(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '2x3',
'Stride': '1x1',
'Dilation': '1x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_dilation1x2(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '1x1',
'Dilation': '1x2',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
@pytest.mark.skip(reason="dilationY breaks conv reference model")
def test_dilation2x1(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '1x1',
'Dilation': '2x1',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
@pytest.mark.skip(reason="dilationY breaks conv reference model")
def test_dilation2x3(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '1x1',
'Stride': '1x1',
'Dilation': '2x3',
'Spatial': '17x31',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_filter_stride_dilation_0(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'TensorBFormat': 'KCYX',
'TensorDFormat': 'NCHW',
'UnrollOnChannel': 0,
'Filter': '2x3',
'Stride': '2x3',
'Dilation': '2x3',
'Spatial': '17x31',
})
assert(z['NumIndicesC']==4)
assert(z['IndexAssignmentsA']==[6,5, 0,1, 4,3])
assert(z['IndexAssignmentsB']==[6,5, 4, 2, 3])
assert(z['UseInitialStridesAB'])
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
def test_filter_stride_dilation_1(run_convolution_vs_contraction):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
'Filter': '6x7',
'Stride': '2x3',
'Dilation': '4x5',
'Spatial': '27x51',
})
log.debug(conv.printUsage(z))
run_convolution_vs_contraction(conv)
| 36.73494
| 66
| 0.528862
| 513
| 6,098
| 6.111111
| 0.146199
| 0.103668
| 0.191388
| 0.206699
| 0.841467
| 0.841467
| 0.841467
| 0.841467
| 0.841467
| 0.841467
| 0
| 0.038327
| 0.345359
| 6,098
| 165
| 67
| 36.957576
| 0.746994
| 0.045097
| 0
| 0.787671
| 0
| 0
| 0.205432
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 1
| 0.082192
| false
| 0
| 0.013699
| 0
| 0.09589
| 0.082192
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6a08234512c4c7c6273ac2fef240da01b52d7d59
| 17,635
|
bzl
|
Python
|
dotnet/private/deps/nunit.bzl
|
Saqoosha/rules_dotnet
|
4311b84a47e7850293aba9207534ad49056e9a2d
|
[
"Apache-2.0"
] | 1
|
2021-12-24T14:21:43.000Z
|
2021-12-24T14:21:43.000Z
|
dotnet/private/deps/nunit.bzl
|
Saqoosha/rules_dotnet
|
4311b84a47e7850293aba9207534ad49056e9a2d
|
[
"Apache-2.0"
] | null | null | null |
dotnet/private/deps/nunit.bzl
|
Saqoosha/rules_dotnet
|
4311b84a47e7850293aba9207534ad49056e9a2d
|
[
"Apache-2.0"
] | 1
|
2020-01-29T15:22:44.000Z
|
2020-01-29T15:22:44.000Z
|
load("@io_bazel_rules_dotnet//dotnet/private:rules/nuget.bzl", "nuget_package")
def dotnet_repositories_nunit():
### Generated by the tool
nuget_package(
name = "nunit",
package = "nunit",
version = "3.12.0",
sha256 = "62b67516a08951a20b12b02e5d20b5045edbb687c3aabe9170286ec5bb9000a1",
core_lib = {
"netcoreapp2.0": "lib/netstandard2.0/nunit.framework.dll",
"netcoreapp2.1": "lib/netstandard2.0/nunit.framework.dll",
},
net_lib = {
"net45": "lib/net45/nunit.framework.dll",
"net451": "lib/net45/nunit.framework.dll",
"net452": "lib/net45/nunit.framework.dll",
"net46": "lib/net45/nunit.framework.dll",
"net461": "lib/net45/nunit.framework.dll",
"net462": "lib/net45/nunit.framework.dll",
"net47": "lib/net45/nunit.framework.dll",
"net471": "lib/net45/nunit.framework.dll",
"net472": "lib/net45/nunit.framework.dll",
"netstandard1.4": "lib/netstandard1.4/nunit.framework.dll",
"netstandard1.5": "lib/netstandard1.4/nunit.framework.dll",
"netstandard1.6": "lib/netstandard1.4/nunit.framework.dll",
"netstandard2.0": "lib/netstandard2.0/nunit.framework.dll",
},
mono_lib = "lib/net45/nunit.framework.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard2.0/nunit.framework.dll",
"lib/netstandard2.0/nunit.framework.pdb",
"lib/netstandard2.0/nunit.framework.xml",
],
"netcoreapp2.1": [
"lib/netstandard2.0/nunit.framework.dll",
"lib/netstandard2.0/nunit.framework.pdb",
"lib/netstandard2.0/nunit.framework.xml",
],
},
net_files = {
"net45": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net451": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net452": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net46": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net461": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net462": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net47": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net471": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"net472": [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
"netstandard1.4": [
"lib/netstandard1.4/nunit.framework.dll",
"lib/netstandard1.4/nunit.framework.pdb",
"lib/netstandard1.4/nunit.framework.xml",
],
"netstandard1.5": [
"lib/netstandard1.4/nunit.framework.dll",
"lib/netstandard1.4/nunit.framework.pdb",
"lib/netstandard1.4/nunit.framework.xml",
],
"netstandard1.6": [
"lib/netstandard1.4/nunit.framework.dll",
"lib/netstandard1.4/nunit.framework.pdb",
"lib/netstandard1.4/nunit.framework.xml",
],
"netstandard2.0": [
"lib/netstandard2.0/nunit.framework.dll",
"lib/netstandard2.0/nunit.framework.pdb",
"lib/netstandard2.0/nunit.framework.xml",
],
},
mono_files = [
"lib/net45/nunit.framework.dll",
"lib/net45/nunit.framework.pdb",
"lib/net45/nunit.framework.xml",
],
)
nuget_package(
name = "nunit.consolerunner",
package = "nunit.consolerunner",
version = "3.10.0",
sha256 = "e852dad9a2ec1bd3ee48f3a6be68c7e2322582eaee710c439092c32087f49e84",
core_lib = {
"netcoreapp2.0": "tools/Mono.Cecil.dll",
"netcoreapp2.1": "tools/Mono.Cecil.dll",
},
net_lib = {
"net45": "tools/Mono.Cecil.dll",
"net451": "tools/Mono.Cecil.dll",
"net452": "tools/Mono.Cecil.dll",
"net46": "tools/Mono.Cecil.dll",
"net461": "tools/Mono.Cecil.dll",
"net462": "tools/Mono.Cecil.dll",
"net47": "tools/Mono.Cecil.dll",
"net471": "tools/Mono.Cecil.dll",
"net472": "tools/Mono.Cecil.dll",
"netstandard1.0": "tools/Mono.Cecil.dll",
"netstandard1.1": "tools/Mono.Cecil.dll",
"netstandard1.2": "tools/Mono.Cecil.dll",
"netstandard1.3": "tools/Mono.Cecil.dll",
"netstandard1.4": "tools/Mono.Cecil.dll",
"netstandard1.5": "tools/Mono.Cecil.dll",
"netstandard1.6": "tools/Mono.Cecil.dll",
"netstandard2.0": "tools/Mono.Cecil.dll",
},
mono_lib = "tools/Mono.Cecil.dll",
core_tool = {
"netcoreapp2.0": "tools/nunit3-console.exe",
"netcoreapp2.1": "tools/nunit3-console.exe",
},
net_tool = {
"net45": "tools/nunit3-console.exe",
"net451": "tools/nunit3-console.exe",
"net452": "tools/nunit3-console.exe",
"net46": "tools/nunit3-console.exe",
"net461": "tools/nunit3-console.exe",
"net462": "tools/nunit3-console.exe",
"net47": "tools/nunit3-console.exe",
"net471": "tools/nunit3-console.exe",
"net472": "tools/nunit3-console.exe",
"netstandard1.0": "tools/nunit3-console.exe",
"netstandard1.1": "tools/nunit3-console.exe",
"netstandard1.2": "tools/nunit3-console.exe",
"netstandard1.3": "tools/nunit3-console.exe",
"netstandard1.4": "tools/nunit3-console.exe",
"netstandard1.5": "tools/nunit3-console.exe",
"netstandard1.6": "tools/nunit3-console.exe",
"netstandard2.0": "tools/nunit3-console.exe",
},
mono_tool = "tools/nunit3-console.exe",
core_files = {
"netcoreapp2.0": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netcoreapp2.1": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
},
net_files = {
"net45": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net451": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net452": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net46": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net461": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net462": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net47": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net471": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"net472": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.0": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.1": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.2": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.3": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.4": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.5": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard1.6": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
"netstandard2.0": [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
},
mono_files = [
"tools/Mono.Cecil.dll",
"tools/nunit-agent-x86.exe",
"tools/nunit-agent-x86.exe.config",
"tools/nunit-agent.exe",
"tools/nunit-agent.exe.config",
"tools/nunit.engine.api.dll",
"tools/nunit.engine.api.xml",
"tools/nunit.engine.dll",
"tools/nunit.nuget.addins",
"tools/nunit3-console.exe",
"tools/nunit3-console.exe.config",
],
)
### End of generated by the tool
return
| 40.54023
| 84
| 0.490388
| 1,736
| 17,635
| 4.968318
| 0.038018
| 0.185507
| 0.13913
| 0.146087
| 0.905043
| 0.837913
| 0.760928
| 0.760928
| 0.723362
| 0.723362
| 0
| 0.049996
| 0.356904
| 17,635
| 434
| 85
| 40.633641
| 0.710519
| 0.002835
| 0
| 0.802784
| 1
| 0
| 0.547275
| 0.450222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00232
| true
| 0
| 0
| 0
| 0.00464
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e007e4df5a59ad3bbc1ac232a6f3a3f61908e80f
| 1,775
|
py
|
Python
|
catkin_ws/build/fetch_gazebo/fetchit_challenge/cmake/fetchit_challenge-genmsg-context.py
|
RHolmewood/FetchRobot_Project2
|
c096dd4bf88691d893010e95074f5c53baac37bc
|
[
"MIT"
] | null | null | null |
catkin_ws/build/fetch_gazebo/fetchit_challenge/cmake/fetchit_challenge-genmsg-context.py
|
RHolmewood/FetchRobot_Project2
|
c096dd4bf88691d893010e95074f5c53baac37bc
|
[
"MIT"
] | null | null | null |
catkin_ws/build/fetch_gazebo/fetchit_challenge/cmake/fetchit_challenge-genmsg-context.py
|
RHolmewood/FetchRobot_Project2
|
c096dd4bf88691d893010e95074f5c53baac37bc
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineAction.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineActionGoal.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineActionResult.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineActionFeedback.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineGoal.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineResult.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SchunkMachineFeedback.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraAction.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraActionGoal.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraActionResult.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraActionFeedback.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraGoal.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraResult.msg;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg/SickCameraFeedback.msg"
services_str = ""
pkg_name = "fetchit_challenge"
dependencies_str = "actionlib_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "fetchit_challenge;/home/lachlan/catkin_ws/devel/share/fetchit_challenge/msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 147.916667
| 1,179
| 0.849014
| 241
| 1,775
| 6.033195
| 0.26971
| 0.18707
| 0.175378
| 0.196011
| 0.55227
| 0.522008
| 0.522008
| 0.522008
| 0.522008
| 0.455983
| 0
| 0.000574
| 0.019155
| 1,775
| 11
| 1,180
| 161.363636
| 0.834578
| 0.027606
| 0
| 0
| 1
| 0.222222
| 0.882251
| 0.852668
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e01c0416c89d7be58a231c8f0806e95636663854
| 16,164
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | 3
|
2016-12-01T15:55:11.000Z
|
2016-12-01T15:56:38.000Z
|
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.0.0-1597"))
class TestJobHistoryServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
STACK_VERSION = "2.2"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
def test_configure_default(self, copy_to_hdfs_mock):
copy_to_hdfs_mock = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
def test_start_default(self, copy_to_hdfs_mock):
copy_to_hdfs_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "start",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/start-history-server.sh',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = 'ls /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid >/dev/null 2>&1 && ps -p `cat /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid` >/dev/null 2>&1',
user = 'spark',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "stop",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/stop-history-server.sh',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
user = 'spark',
)
self.assertResourceCalled('File', '/var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "configure",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
def test_start_secured(self, copy_to_hdfs_mock):
copy_to_hdfs_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "start",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/spark.service.keytab spark/localhost@EXAMPLE.COM; ',
user = 'spark',
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
action=['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
default_fs= UnknownConfigurationMock(),
hadoop_bin_dir='/usr/hdp/current/hadoop-client/bin',
hadoop_conf_dir='/usr/hdp/current/hadoop-client/conf',
hdfs_site=UnknownConfigurationMock(),
keytab=UnknownConfigurationMock(),
kinit_path_local='/usr/bin/kinit',
principal_name=UnknownConfigurationMock(),
security_enabled=True,
dfs_type = '',
user=UnknownConfigurationMock()
)
self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/start-history-server.sh',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = 'ls /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid >/dev/null 2>&1 && ps -p `cat /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid` >/dev/null 2>&1',
user = 'spark',
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "stop",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/stop-history-server.sh',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
user = 'spark',
)
self.assertResourceCalled('File', '/var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/var/run/spark',
owner = 'spark',
group = 'hadoop',
recursive = True,
mode = 0775
)
self.assertResourceCalled('Directory', '/var/log/spark',
owner = 'spark',
group = 'hadoop',
recursive = True,
mode = 0775
)
self.assertResourceCalled('HdfsResource', '/user/spark',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'spark',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0775,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
owner = 'spark',
key_value_delimiter = ' ',
group = 'spark',
properties = self.getConfig()['configurations']['spark-defaults'],
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
owner = 'spark',
group = 'spark',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
owner = 'spark',
group = 'spark',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
owner = 'spark',
group = 'spark',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/spark-client/logs',
owner = 'spark',
group = 'spark',
mode = 0755,
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/var/run/spark',
owner = 'spark',
group = 'hadoop',
recursive = True,
mode = 0775
)
self.assertResourceCalled('Directory', '/var/log/spark',
owner = 'spark',
group = 'hadoop',
recursive = True,
mode = 0775
)
self.assertResourceCalled('HdfsResource', '/user/spark',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = UnknownConfigurationMock(),
hdfs_site = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = UnknownConfigurationMock(),
owner = 'spark',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
dfs_type = '',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0775,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = UnknownConfigurationMock(),
hdfs_site = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = UnknownConfigurationMock(),
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
dfs_type = '',
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/spark-client/conf/spark-defaults.conf',
owner = 'spark',
key_value_delimiter = ' ',
group = 'spark',
properties = self.getConfig()['configurations']['spark-defaults'],
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/spark-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['spark-env']['content']),
owner = 'spark',
group = 'spark',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/log4j.properties',
content = '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO',
owner = 'spark',
group = 'spark',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/spark-client/conf/metrics.properties',
content = InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content']),
owner = 'spark',
group = 'spark',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/spark-client/logs',
owner = 'spark',
group = 'spark',
mode = 0755,
)
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
copy_to_hdfs_mock.return_value = True
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
classname = "JobHistoryServer",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('hdp-select', 'set', 'spark-historyserver', version), sudo=True)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'spark', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
| 48.39521
| 653
| 0.664996
| 1,835
| 16,164
| 5.686104
| 0.159128
| 0.062105
| 0.032394
| 0.024152
| 0.844451
| 0.828445
| 0.827008
| 0.820587
| 0.820587
| 0.812728
| 0
| 0.015448
| 0.203044
| 16,164
| 333
| 654
| 48.540541
| 0.794519
| 0.001237
| 0
| 0.761745
| 0
| 0.02349
| 0.36515
| 0.247659
| 0
| 0
| 0
| 0
| 0.151007
| 0
| null | null | 0
| 0.010067
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e054a5745344cc75e7903418581ff63d795cec92
| 52,772
|
py
|
Python
|
tensorflow/python/distribute/vars_test.py
|
jessecantu/tensorflow
|
b9c6bd0008933f640d2c8f3a372de1f75f7208da
|
[
"Apache-2.0"
] | 1
|
2021-10-02T14:03:09.000Z
|
2021-10-02T14:03:09.000Z
|
tensorflow/python/distribute/vars_test.py
|
jessecantu/tensorflow
|
b9c6bd0008933f640d2c8f3a372de1f75f7208da
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/vars_test.py
|
jessecantu/tensorflow
|
b9c6bd0008933f640d2c8f3a372de1f75f7208da
|
[
"Apache-2.0"
] | 1
|
2021-10-03T18:47:35.000Z
|
2021-10-03T18:47:35.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the distributed values library."""
import itertools
import uuid
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_management as ckpt_manager
from tensorflow.python.training.tracking import util as trackable_utils
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def strategy_and_run_tf_function_combinations():
# Test the combination of different strategies and whether a tf.function
# is passed into strategy.run."""
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True, False],
use_var_policy=[True, False]) + combinations.combine(
distribution=[
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True],
use_var_policy=[True, False])
def strategy_with_var_policy():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
use_var_policy=[True, False])
class OnWriteVariableSync(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if (not cross_replica and aggregation ==
variables_lib.VariableAggregation.SUM):
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnWriteVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(2.0, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
per_replica_sub_value = values.PerReplica(
[constant_op.constant(-2.0),
constant_op.constant(-2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value), ("assign_add", per_replica_value),
("assign_sub", per_replica_sub_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if cross_replica:
# We don't support assigning PerReplica values to MirroredVariables in
# cross replica context
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_with_var_policy())
def testValueInReplicaContext(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
1., aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def f():
with ops.control_dependencies([v.assign_add(1.)]):
return v.value()
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(f)))
for value in results:
self.assertEqual(2., value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
2.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = read_var_fn()
for component in v._values:
self.assertEqual(self.evaluate(component.read_value()),
self.evaluate(results))
@combinations.generate(strategy_with_var_policy())
def testAssignOutOfScope(self, distribution):
with distribution.scope():
mirrored = variables_lib.Variable(1.)
self.evaluate(mirrored.assign(3.))
self.assertEqual(self.evaluate(mirrored.read_value()), 3.)
for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only test")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(random_ops.random_normal([]))
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testAggregationOnlyFirstReplica(self, distribution):
with distribution.scope():
v = variable_scope.variable(
15.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
per_replica_results = self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
# The per-replica values should always match the first replicas value.
self.assertAllEqual(
array_ops.zeros(distribution.num_replicas_in_sync, dtypes.float32),
per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testInitScope(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
class C(object):
pass
obj = C()
obj.w = None
obj.v = None
@def_function.function
def assign():
with ops.init_scope():
if obj.w is None:
obj.w = variables_lib.Variable(
0., aggregation=variables_lib.VariableAggregation.MEAN)
obj.v = variables_lib.Variable(
obj.w.read_value(),
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
return obj.v.assign_add(2.)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
self.assertAllEqual([2., 2.], per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
1, aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.assertEqual(2, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([2, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnWrite(self, strategy):
aggregation = [
variable_scope.VariableAggregation.NONE,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA,
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN
]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(3.0)
with strategy.scope():
v_on_write = variables_lib.Variable(2.0, aggregation=agg)
# Save ONWRITE Restore ONWRITE
# Save
ckpt = trackable_utils.Checkpoint(var=v_on_write)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restore
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_on_write.read_value()))
# Save Mirrored Restore Normal
# We've already saved Mirrored, so we only need to restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_normal_restore.read_value()))
# Save Normal Restore Mirrored
# Save
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager_2 = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckptckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager_2.save()
# Restore
ckpt_on_write = trackable_utils.Checkpoint(var=v_on_write)
ckpt_on_write.restore(manager_2.latest_checkpoint)
self.assertEqual(3.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(3.0, self.evaluate(v_on_write.read_value()))
ms_combination = combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"])
tpu_combination = combinations.combine(
distribution=[strategy_combinations.tpu_strategy_packed_var],
mode=["graph", "eager"])
class OnWriteVariableSyncScatterTests(test.TestCase, parameterized.TestCase):
@combinations.generate(ms_combination)
def testScatterSub(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_sub():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([
math_ops.cast(replica_id, dtypes.float32),
math_ops.cast(replica_id + 1, dtypes.float32)
]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_sub(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_sub)))
self.assertAllEqual([[0., -1., -1.], [0., -1., -1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterAdd(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_add():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([replica_id, replica_id + 1]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_add(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_add)))
self.assertAllEqual([[0, 2, 2], [0, 2, 2]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterDiv(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[1, 6, 1], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_div():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(replica_id + 2, [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_div(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_div)))
self.assertAllEqual([[0, 2, 1], [0, 2, 1]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMul(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_mul():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(
math_ops.cast(replica_id + 2, dtypes.float32), [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_mul(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_mul)))
self.assertAllClose([[2., 1.5, 1.], [2., 1.5, 1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMin(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 2, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 2, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_min(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_min(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_min.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v2,))))
self.assertAllClose([[0, 1, 0], [0, 1, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMax(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_max(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([0]),
dense_shape=(3,))
return v.scatter_max(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_max.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v2,))))
self.assertAllClose([[1, 0, 0], [1, 0, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterUpdate(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_update(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([3]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_update(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_update.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v2,))))
self.assertAllClose([[0, 3, 0], [0, 3, 0]], per_replica_results)
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsWithNoneAggregation(self, distribution):
def assert_close(v, op, delta, expect):
scatter_op = getattr(v, op)
@def_function.function
def scatter_xxx():
return scatter_op(delta)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_xxx)))
self.assertAllClose([expect, expect], per_replica_results)
with distribution.scope():
v = variables_lib.Variable(
[4.], aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
delta = indexed_slices.IndexedSlices(
values=array_ops.identity([2.]),
indices=array_ops.identity([0]),
dense_shape=(1,))
assert_close(v, "scatter_sub", delta, [2.])
assert_close(v, "scatter_add", delta, [4.])
assert_close(v, "scatter_max", delta, [4.])
assert_close(v, "scatter_min", delta, [2.])
assert_close(v, "scatter_mul", delta, [4.])
assert_close(v, "scatter_div", delta, [2.])
assert_close(v, "scatter_update", delta, [2.])
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsInCrossReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[1, 1, 1], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable([1, 1, 1])
self.evaluate(variables_lib.global_variables_initializer())
value = indexed_slices.IndexedSlices(
values=array_ops.identity([2]),
indices=array_ops.identity([0]),
dense_shape=(3,))
with distribution.scope():
self.evaluate(v1.scatter_add(value))
self.assertAllEqual([3, 1, 1], self.evaluate(v1.read_value()))
self.evaluate(v2.scatter_min(value))
self.assertAllEqual([1, 1, 1], self.evaluate(v2.read_value()))
class OnReadVariableSyncTest(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnReadVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
expected_cross_replica = {
variables_lib.VariableAggregation.SUM: 1.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
expected_replica = {
variables_lib.VariableAggregation.SUM: 2.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
# aggregation=NONE is not supported for OnReadVariables.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if cross_replica:
for component in v._values:
self.assertAllEqual(expected_cross_replica.get(aggregation),
self.evaluate(component.read_value()))
else:
for component in v._values:
self.assertAllEqual(expected_replica.get(aggregation),
self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
self.skipTest("We don't support assiging PerReplica values in cross "
"replica context or replica context. see error in "
"sponge/2b2e54c1-eda6-4534-82e1-c73b1dcd517f.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
# with self.assertRaisesRegex(ValueError, "Attempt to convert a value "):
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignDtypeConversion(self, distribution,
experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1), ("assign_add", 1), ("assign_sub", -1)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(v.assign(1. * distribution.num_replicas_in_sync))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignAddSubWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_add(1.))
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_sub(1.))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
if isinstance(distribution, _TPU_STRATEGIES):
resolver = tpu_cluster_resolver.TPUClusterResolver("")
tpu_strategy_util.initialize_tpu_system(resolver)
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(v=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
num_replicas = distribution.num_replicas_in_sync
sum_of_replica_values = num_replicas * (num_replicas - 1) / 2.
if aggregation == variables_lib.VariableAggregation.SUM:
expected = sum_of_replica_values
elif aggregation == variables_lib.VariableAggregation.MEAN:
expected = sum_of_replica_values / num_replicas
else:
expected = 0
self.assertEqual(expected, self.evaluate(v.read_value()), aggregation)
self.assertEqual(expected, self.evaluate(v.value()), aggregation)
self.assertEqual(expected, self.evaluate(v), aggregation)
self.assertEqual(expected, self.evaluate(array_ops.identity(v)),
aggregation)
# TODO(b/145574622): Re-enable this test once ReduceOp argument is
# respected on GPUs.
@combinations.generate(strategy_and_run_tf_function_combinations())
def disable_testAllReduce(self, distribution,
experimental_run_tf_function):
with distribution.scope():
v = variable_scope.variable(
2.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
def all_reduce():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return ctx.all_reduce("SUM", v) + math_ops.cast(replica_id,
dtypes.float32)
if experimental_run_tf_function:
all_reduce = def_function.function(all_reduce)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(all_reduce)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(2.0 * distribution.num_replicas_in_sync +
1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaBeforeRead(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(var=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return var.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_with_var_policy())
def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "Could not convert from .* VariableAggregation\\.NONE"):
self.evaluate(v.read_value())
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(
random_ops.random_normal([]),
synchronization=variables_lib.VariableSynchronization.ON_READ)
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.0,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
# Assign different replicas with different values.
self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
self.assertEqual(1.5, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([1, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnRead(self, strategy):
aggregation = [variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(2.0)
with strategy.scope():
v_on_read = variables_lib.Variable(
1.0, synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=agg)
@def_function.function
def assign_fn():
cluster_resolver = strategy.cluster_resolver
replica_ctx = ds_context.get_replica_context()
if ((cluster_resolver and cluster_resolver.task_type == "worker") or
math_ops.equal(replica_ctx.replica_id_in_sync_group,
constant_op.constant(1))):
v_on_read.assign(3.) # pylint:disable=cell-var-from-loop
else:
v_on_read.assign(4.) # pylint:disable=cell-var-from-loop
strategy.run(assign_fn)
# Save ONREAD, restore ONREAD
# Saves v[0] + v[1] = 7 for SUM and 3.5 for MEAN.
ckpt = trackable_utils.Checkpoint(var=v_on_read)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 7/2 = 3.5 for SUM and 3.5 for MEAN.
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(3.5, self.evaluate(v_on_read._values[0]))
# Save ONREAD, restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(7.0, self.evaluate(v_normal_restore.read_value()))
else:
self.assertEqual(3.5, self.evaluate(v_normal_restore.read_value()))
# Save normal, restore ONREAD
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 2/2 = 1.0 for SUM and 2.0 for MEAN.
ckpt_on_read = trackable_utils.Checkpoint(var=v_on_read)
ckpt_on_read.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(1.0, self.evaluate(v_on_read._values[0]))
else:
self.assertEqual(2.0, self.evaluate(v_on_read._values[0]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
aggregation=[
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
],
mode=["graph", "eager"],
use_var_policy=[True, False]))
class SyncOnReadScatterReplicaTest(test.TestCase, parameterized.TestCase):
def testScatterSub(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_sub, args=(delta,)))
def testScatterAdd(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_add, args=(delta,)))
def testScatterDiv(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 6., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [3.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_div, args=(delta,)))
def testScatterMul(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [3.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[4.], [5.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_mul, args=(delta,)))
def testScatterMin(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
def testScatterMax(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_max, args=(delta,)))
def testScatterUpdate(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
if __name__ == "__main__":
test_util.main()
| 40.130798
| 84
| 0.688206
| 5,878
| 52,772
| 5.935182
| 0.063287
| 0.057787
| 0.079084
| 0.040932
| 0.870926
| 0.839052
| 0.802993
| 0.783415
| 0.760255
| 0.730645
| 0
| 0.012502
| 0.213333
| 52,772
| 1,314
| 85
| 40.161339
| 0.827865
| 0.057587
| 0
| 0.719816
| 0
| 0
| 0.018282
| 0.003181
| 0
| 0
| 0
| 0.000761
| 0.065438
| 1
| 0.068203
| false
| 0.000922
| 0.023041
| 0.004608
| 0.126267
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0791bd317c669f8efc4e286c195114e56c5cdce
| 150
|
py
|
Python
|
opennmt/optimizers/__init__.py
|
abumafrim/OpenNMT-tf
|
f14c05a7cb8b1b8f3a692d6fea3c12067bc3eb2c
|
[
"MIT"
] | 1
|
2020-10-15T11:13:45.000Z
|
2020-10-15T11:13:45.000Z
|
opennmt/optimizers/__init__.py
|
abumafrim/OpenNMT-tf
|
f14c05a7cb8b1b8f3a692d6fea3c12067bc3eb2c
|
[
"MIT"
] | null | null | null |
opennmt/optimizers/__init__.py
|
abumafrim/OpenNMT-tf
|
f14c05a7cb8b1b8f3a692d6fea3c12067bc3eb2c
|
[
"MIT"
] | 1
|
2021-04-14T14:12:24.000Z
|
2021-04-14T14:12:24.000Z
|
"""Module defining custom optimizers."""
from opennmt.optimizers.utils import make_optimizer
from opennmt.optimizers.utils import register_optimizer
| 30
| 55
| 0.84
| 18
| 150
| 6.888889
| 0.611111
| 0.177419
| 0.33871
| 0.419355
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086667
| 150
| 4
| 56
| 37.5
| 0.905109
| 0.226667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0edcd3e86ee7e6552759bab47d5c32c0d5d9fac9
| 178
|
py
|
Python
|
core/ai/behaviors/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3
|
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
core/ai/behaviors/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
core/ai/behaviors/__init__.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from core.ai.behaviors.base import Behavior
from core.ai.behaviors.meleeattack import MeleeAttack
from core.ai.behaviors.move import Move
from core.ai.behaviors.wait import Wait
| 35.6
| 53
| 0.842697
| 28
| 178
| 5.357143
| 0.357143
| 0.213333
| 0.266667
| 0.506667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 178
| 4
| 54
| 44.5
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
0ee5be5dc35e9b2d7772eb891f62eae8572b1a6e
| 10,335
|
py
|
Python
|
unittests/tools/test_acunetix_parser.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 249
|
2016-09-06T21:04:40.000Z
|
2018-01-19T15:59:44.000Z
|
unittests/tools/test_acunetix_parser.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 255
|
2016-09-06T21:36:37.000Z
|
2018-01-19T19:57:57.000Z
|
unittests/tools/test_acunetix_parser.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 152
|
2016-09-06T21:04:54.000Z
|
2018-01-18T08:52:24.000Z
|
import datetime
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(1, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(352, finding.cwe)
self.assertEqual(datetime.date(2018, 9, 24), finding.date)
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertFalse(finding.false_p)
self.assertEqual("Vijay Test Imapact", finding.impact)
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
self.assertEqual(1, len(finding.unsaved_endpoints))
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual('https', endpoint.protocol)
self.assertEqual(443, endpoint.port)
self.assertEqual('vijaytest.com', endpoint.host)
self.assertEqual('some/path', endpoint.path)
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("A single machine can take down another machine's web server with minimal bandwidth and side effects on unrelated services and ports.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Possible virtual host found", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(200, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible sensitive information disclosure.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Unencrypted connection (verified)", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(310, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible information disclosure.", finding.impact)
# check that this finding have no references
self.assertIsNone(finding.references)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
def test_parse_file_with_example_com(self):
testfile = open("unittests/scans/acunetix/XML_http_example_co_id_.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("HTML form without CSRF protection", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:L/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertIn("An attacker could use CSRF to trick a victim into accessing a website hosted by the attacker,", finding.impact)
# aggregated
self.assertEqual(3, finding.nb_occurences)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(3, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('h/search', endpoint.path)
endpoint = finding.unsaved_endpoints[1]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('m/zmain', endpoint.path)
# check req/resp
self.assertEqual(3, len(finding.unsaved_req_resp))
for req_resp in finding.unsaved_req_resp:
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("Content Security Policy (CSP) not implemented", finding.title)
self.assertEqual("Info", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertFalse(finding.false_p)
self.assertIn("CSP can be used to prevent and/or mitigate attacks that involve content/code injection,", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
| 50.661765
| 180
| 0.630963
| 1,149
| 10,335
| 5.583116
| 0.154917
| 0.128605
| 0.060951
| 0.03258
| 0.819174
| 0.807638
| 0.792985
| 0.744661
| 0.744661
| 0.70491
| 0
| 0.014563
| 0.262506
| 10,335
| 203
| 181
| 50.91133
| 0.82708
| 0.037155
| 0
| 0.706215
| 0
| 0.028249
| 0.112755
| 0.031209
| 0
| 0
| 0
| 0
| 0.717514
| 1
| 0.016949
| false
| 0
| 0.022599
| 0
| 0.045198
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0eff1eb6388bdf042052833b36087da1f0c83ad4
| 32,102
|
py
|
Python
|
Exam_Exercises/Sase.py
|
Konstantin-Bogdanoski/VI
|
fbf934504b00271e1a8d405a2995fc7ed662f37a
|
[
"MIT"
] | 2
|
2020-03-27T20:36:27.000Z
|
2020-09-20T13:34:46.000Z
|
Exam_Exercises/Sase.py
|
Konstantin-Bogdanoski/VI
|
fbf934504b00271e1a8d405a2995fc7ed662f37a
|
[
"MIT"
] | null | null | null |
Exam_Exercises/Sase.py
|
Konstantin-Bogdanoski/VI
|
fbf934504b00271e1a8d405a2995fc7ed662f37a
|
[
"MIT"
] | null | null | null |
from Python_neinformirano_prebaruvanje_final import *
# Check if the white Rook is in a valid position (not in the line of fire of all the black Rooks)
def validityOfWhite(gun, A):
#print("TESTING WHITE VALIDITY")
# print(gun)
location = gun
#print(location[0] != A[4][0] and location[0] != A[5][0] and location[0] != A[6][0] and location[0] != A[7][0] and
# location[1] != A[4][1] and location[1] != A[5][1] and location[1] != A[6][1] and location[1] != A[7][1] and
# location[0] < 6 and location[0] > 0 and location[1] > 0 and location[1] < 8)
return (location[0] != A[4][0] and location[0] != A[5][0] and location[0] != A[6][0] and location[0] != A[7][0] and
location[1] != A[4][1] and location[1] != A[5][1] and location[1] != A[6][1] and location[1] != A[7][1] and
location[0] < 6 and location[0] > 0 and location[1] > 0 and location[1] < 8)
# Check if the black Rook is in a valid position (not in the line of fire of all the white Rooks)
def validityOfBlack(gun, A):
#print("TESTING BLACK VALIDITY")
# print(gun)
location = gun
#print(location[0] != A[0][0] and location[0] != A[1][0] and location[0] != A[2][0] and location[0] != A[3][0] and
# location[1] != A[0][1] and location[1] != A[1][1] and location[1] != A[2][1] and location[1] != A[3][1] and
# location[0] < 6 and location[0] > 0 and location[1] > 0 and location[1] < 8)
return (location[0] != A[0][0] and location[0] != A[1][0] and location[0] != A[2][0] and location[0] != A[3][0] and
location[1] != A[0][1] and location[1] != A[1][1] and location[1] != A[2][1] and location[1] != A[3][1] and
location[0] < 6 and location[0] > 0 and location[1] > 0 and location[1] < 8)
# Check if the white rooks are not on top of each other
def validityWhiteOnWhite(A):
#print("TESTING WHITEonWHITE")
#print(A[0][0] != A[1][0] and A[0][0] != A[2][0] and A[0][0] != A[3][0] and
# A[1][0] != A[2][0] and A[1][0] != A[3][0] and
# A[2][0] != A[3][0] and
# A[0][1] != A[1][1] and A[0][1] != A[2][1] and A[0][1] != A[3][1] and
# A[1][1] != A[2][1] and A[1][1] != A[3][1] and
# A[2][1] != A[3][1])
return (A[0][0] != A[1][0] and A[0][0] != A[2][0] and A[0][0] != A[3][0] and
A[1][0] != A[2][0] and A[1][0] != A[3][0] and
A[2][0] != A[3][0] and
A[0][1] != A[1][1] and A[0][1] != A[2][1] and A[0][1] != A[3][1] and
A[1][1] != A[2][1] and A[1][1] != A[3][1] and
A[2][1] != A[3][1])
# Check if the black rooks are not on top of each other
def validityBlackOnBlack(A):
#print("TESTING BLACKonBLACK")
#print(A[0][0] != A[1][0] and A[0][0] != A[2][0] and A[0][0] != A[3][0] and
# A[1][0] != A[2][0] and A[1][0] != A[3][0] and
# A[2][0] != A[3][0] and
# A[0][1] != A[1][1] and A[0][1] != A[2][1] and A[0][1] != A[3][1] and
# A[1][1] != A[2][1] and A[1][1] != A[3][1] and
# A[2][1] != A[3][1])
return (A[4][0] != A[5][0] and A[4][0] != A[6][0] and A[4][0] != A[7][0] and
A[5][0] != A[6][0] and A[5][0] != A[7][0] and
A[6][0] != A[7][0] and
A[4][1] != A[5][1] and A[4][1] != A[6][1] and A[4][1] != A[7][1] and
A[5][1] != A[6][1] and A[5][1] != A[7][1] and
A[6][1] != A[7][1])
class Rooks(Problem):
def __init__(self, initial):
self.initial = initial
def goal_test(self, state):
return ((state[0] == (5,8) or state[0] == (5,7) or state[0] == (5,6) or state[0] == (5,5)) and
(state[1] == (5, 8) or state[1] == (5, 7) or state[1] == (5, 6) or state[1] == (5, 5)) and
(state[2] == (5, 8) or state[2] == (5, 7) or state[2] == (5, 6) or state[2] == (5, 5)) and
(state[3] == (5, 8) or state[3] == (5, 7) or state[3] == (5, 6) or state[3] == (5, 5)) and
(state[4] == (1, 1) or state[4] == (1, 2) or state[4] == (1, 3) or state[4] == (1, 4)) and
(state[5] == (1, 1) or state[5] == (1, 2) or state[5] == (1, 3) or state[5] == (1, 4)) and
(state[6] == (1, 1) or state[6] == (1, 2) or state[6] == (1, 3) or state[6] == (1, 4)) and
(state[7] == (1, 1) or state[7] == (1, 2) or state[7] == (1, 3) or state[7] == (1, 4)))
def actions(self, state):
return self.successor(state).keys()
def result(self, state, action):
possible = self.successor(state)
return possible[action]
def successor(self, state):
successors = dict()
WhiteRook1 = state[0]
WhiteRook2 = state[1]
WhiteRook3 = state[2]
WhiteRook4 = state[3]
BlackRook1 = state[4]
BlackRook2 = state[5]
BlackRook3 = state[6]
BlackRook4 = state[7]
#
#
#
# WHITE ROOK 1
#
#
#
# WhiteRook1 UP
newWhiteRook1 = WhiteRook1
moves=0
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while(not(validityOfWhite(newWhiteRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook1 = (newWhiteRook1[0], newWhiteRook1[1] - 1)
moves+=1
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook1 = (newWhiteRook1[0], newWhiteRook1[1] + 1)
stateNew = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook1 - UP: ' + str(moves)] = stateNew
# WhiteRook1 DOWN
newWhiteRook1 = WhiteRook1
moves = 0
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook1 = (newWhiteRook1[0], newWhiteRook1[1] + 1)
moves += 1
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook1 = (newWhiteRook1[0], newWhiteRook1[1] - 1)
stateNew = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook1 - DOWN: ' + str(moves)] = stateNew
# WhiteRook1 LEFT
newWhiteRook1 = WhiteRook1
moves = 0
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook1 = (newWhiteRook1[0] - 1, newWhiteRook1[1])
moves += 1
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook1 = (newWhiteRook1[0] + 1, newWhiteRook1[1])
stateNew = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook1 - LEFT: ' + str(moves)] = stateNew
# WhiteRook1 RIGHT
newWhiteRook1 = WhiteRook1
moves = 0
tempState = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook1 = (newWhiteRook1[0] + 1, newWhiteRook1[1])
moves += 1
tempState = (
newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook1 = (newWhiteRook1[0] - 1, newWhiteRook1[1])
stateNew = (newWhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook1 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# WHITE ROOK 2
#
#
#
# WhiteRook2 UP
newWhiteRook2 = WhiteRook2
moves = 0
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook2 = (newWhiteRook2[0], newWhiteRook2[1] - 1)
moves += 1
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook2 = (newWhiteRook2[0], newWhiteRook2[1] + 1)
stateNew = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook2 - UP: ' + str(moves)] = stateNew
# WhiteRook2 DOWN
newWhiteRook2 = WhiteRook2
moves = 0
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook2 = (newWhiteRook2[0], newWhiteRook2[1] + 1)
moves += 1
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook2 = (newWhiteRook2[0], newWhiteRook2[1] - 1)
stateNew = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook2 - DOWN: ' + str(moves)] = stateNew
# WhiteRook2 LEFT
newWhiteRook2 = WhiteRook2
moves = 0
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook2 = (newWhiteRook2[0] - 1, newWhiteRook2[1])
moves += 1
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook2 = (newWhiteRook2[0] + 1, newWhiteRook2[1])
stateNew = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook2 - LEFT: ' + str(moves)] = stateNew
# WhiteRook2 RIGHT
newWhiteRook2 = WhiteRook2
moves = 0
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook2 = (newWhiteRook2[0] + 1, newWhiteRook2[1])
moves += 1
tempState = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook2 = (newWhiteRook2[0] - 1, newWhiteRook2[1])
stateNew = (WhiteRook1, newWhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook2 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# WHITE ROOK 3
#
#
#
# WhiteRook3 UP
newWhiteRook3 = WhiteRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(tempState)):
newWhiteRook3 = (newWhiteRook3[0], newWhiteRook3[1] - 1)
moves += 1
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook3 = (newWhiteRook3[0], newWhiteRook3[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook3 - UP: ' + str(moves)] = stateNew
# WhiteRook3 DOWN
newWhiteRook3 = WhiteRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook3 = (newWhiteRook3[0], newWhiteRook3[1] + 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook3 = (newWhiteRook3[0], newWhiteRook3[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook3 - DOWN: ' + str(moves)] = stateNew
# WhiteRook3 LEFT
newWhiteRook3 = WhiteRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook3 = (newWhiteRook3[0] - 1, newWhiteRook3[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook3 = (newWhiteRook3[0] + 1, newWhiteRook3[1])
stateNew = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook3 - LEFT: ' + str(moves)] = stateNew
# WhiteRook3 RIGHT
newWhiteRook3 = WhiteRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook3 = (newWhiteRook3[0] + 1, newWhiteRook3[1])
moves += 1
tempState = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook3 = (newWhiteRook3[0] - 1, newWhiteRook3[1])
stateNew = (WhiteRook1, WhiteRook2, newWhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook3 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# WHITE ROOK 4
#
#
#
# WhiteRook4 UP
newWhiteRook4 = WhiteRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook4 = (newWhiteRook4[0], newWhiteRook4[1] - 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook4 = (newWhiteRook4[0], newWhiteRook4[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook4 - UP: ' + str(moves)] = stateNew
# WhiteRook4 DOWN
newWhiteRook4 = WhiteRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook4 = (newWhiteRook4[0], newWhiteRook4[1] + 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook4 = (newWhiteRook4[0], newWhiteRook4[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook4 - DOWN: ' + str(moves)] = stateNew
# WhiteRook4 LEFT
newWhiteRook4 = WhiteRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook4 = (newWhiteRook4[0] - 1, newWhiteRook4[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook4 = (newWhiteRook4[0] + 1, newWhiteRook4[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook4 - LEFT: ' + str(moves)] = stateNew
# WhiteRook4 RIGHT
newWhiteRook4 = WhiteRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfWhite(newWhiteRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newWhiteRook4 = (newWhiteRook4[0] + 1, newWhiteRook4[1])
moves += 1
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
newWhiteRook4 = (newWhiteRook4[0] - 1, newWhiteRook4[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['WhiteRook4 - RIGHT: ' + str(moves)] = stateNew
#
#
#
# BLACK ROOK 1
#
#
#
# BlackRook1 UP
newBlackRook1 = BlackRook1
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook1 = (newBlackRook1[0], newBlackRook1[1] - 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
newBlackRook1 = (newBlackRook1[0], newBlackRook1[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['BlackRook1 - UP: ' + str(moves)] = stateNew
# BlackRook1 DOWN
newBlackRook1 = WhiteRook1
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook1 = (newBlackRook1[0], newBlackRook1[1] + 1)
moves += 1
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
newBlackRook1 = (newBlackRook1[0], newBlackRook1[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['BlackRook1 - DOWN: ' + str(moves)] = stateNew
# BlackRook1 LEFT
newBlackRook1 = WhiteRook1
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook1 = (newBlackRook1[0] - 1, newBlackRook1[1])
moves += 1
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
newBlackRook1 = (newBlackRook1[0] + 1, newBlackRook1[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['BlackRook1 - LEFT: ' + str(moves)] = stateNew
# BlackRook1 RIGHT
newBlackRook1 = WhiteRook1
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook1, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook1 = (newBlackRook1[0] + 1, newBlackRook1[1])
moves += 1
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
newBlackRook1 = (newBlackRook1[0] - 1, newBlackRook1[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, newBlackRook1, BlackRook2, BlackRook3, BlackRook4)
successors['BlackRook1 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# BLACK ROOK 2
#
#
#
# BlackRook2 UP
newBlackRook2 = BlackRook2
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook2 = (newBlackRook2[0], newBlackRook2[1] - 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
newBlackRook2 = (newBlackRook2[0], newBlackRook2[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
successors['BlackRook2 - UP: ' + str(moves)] = stateNew
# BlackRook2 DOWN
newBlackRook2 = BlackRook2
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook2 = (newBlackRook2[0], newBlackRook2[1] + 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
newBlackRook2 = (newBlackRook2[0], newBlackRook2[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
successors['BlackRook2 - DOWN: ' + str(moves)] = stateNew
# BlackRook2 LEFT
newBlackRook2 = BlackRook2
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook2 = (newBlackRook2[0] - 1, newBlackRook2[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
newBlackRook2 = (newBlackRook2[0] + 1, newBlackRook2[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
successors['BlackRook2 - LEFT: ' + str(moves)] = stateNew
# BlackRook2 RIGHT
newBlackRook2 = BlackRook2
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook2, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook2 = (newBlackRook2[0] + 1, newBlackRook2[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
newBlackRook2 = (newBlackRook2[0] - 1, newBlackRook2[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, newBlackRook2, BlackRook3, BlackRook4)
successors['BlackRook2 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# BLACK ROOK 3
#
#
#
# BlackRook3 UP
newBlackRook3 = BlackRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook3 = (newBlackRook3[0], newBlackRook3[1] - 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
newBlackRook3 = (newBlackRook3[0], newBlackRook3[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
successors['BlackRook3 - UP: ' + str(moves)] = stateNew
# BlackRook3 DOWN
newBlackRook3 = BlackRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook3 = (newBlackRook3[0], newBlackRook3[1] + 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
newBlackRook3 = (newBlackRook3[0], newBlackRook3[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
successors['BlackRook3 - DOWN: ' + str(moves)] = stateNew
# BlackRook3 LEFT
newBlackRook3 = BlackRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook3 = (newBlackRook3[0] - 1, newBlackRook3[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
newBlackRook3 = (newBlackRook3[0] + 1, newBlackRook3[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
successors['BlackRook3 - LEFT: ' + str(moves)] = stateNew
# BlackRook3 RIGHT
newBlackRook3 = BlackRook3
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook3, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook3 = (newBlackRook3[0] + 1, newBlackRook3[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
newBlackRook3 = (newBlackRook3[0] - 1, newBlackRook3[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, newBlackRook3, BlackRook4)
successors['BlackRook3 - RIGHT: ' + str(moves)] = stateNew
#
#
#
#
# BLACK ROOK 4
#
#
#
# BlackRook4 UP
newBlackRook4 = BlackRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, newWhiteRook4, BlackRook1, BlackRook2, BlackRook3, BlackRook4)
while (not(validityOfBlack(newBlackRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook4 = (newBlackRook4[0], newBlackRook4[1] - 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
newBlackRook4 = (newBlackRook4[0], newBlackRook4[1] + 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
successors['BlackRook4 - UP: ' + str(moves)] = stateNew
# BlackRook4 DOWN
newBlackRook4 = BlackRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
while (not(validityOfBlack(newBlackRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook4 = (newBlackRook4[0], newBlackRook4[1] + 1)
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
newBlackRook4 = (newBlackRook4[0], newBlackRook4[1] - 1)
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
successors['BlackRook4 - DOWN: ' + str(moves)] = stateNew
# BlackRook4 LEFT
newBlackRook4 = BlackRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
while (not(validityOfBlack(newBlackRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook4 = (newBlackRook4[0] - 1, newBlackRook4[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
newBlackRook4 = (newBlackRook4[0] + 1, newBlackRook4[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
successors['BlackRook4 - LEFT: ' + str(moves)] = stateNew
# BlackRook4 RIGHT
newBlackRook4 = BlackRook4
moves = 0
tempState = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
while (not(validityOfBlack(newBlackRook4, tempState)) and validityWhiteOnWhite(tempState) and validityBlackOnBlack(
tempState)):
newBlackRook4 = (newBlackRook4[0] + 1, newBlackRook4[1])
moves += 1
tempState = (
WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
newBlackRook4 = (newBlackRook4[0] - 1, newBlackRook4[1])
stateNew = (WhiteRook1, WhiteRook2, WhiteRook3, WhiteRook4, BlackRook1, BlackRook2, BlackRook3, newBlackRook4)
successors['BlackRook4 - RIGHT: ' + str(moves)] = stateNew
return successors
Testing = Rooks(((1, 1), (1, 2), (1, 3), (1, 4), (5, 5), (5, 6), (5, 7), (5, 8)))
answer = breadth_first_tree_search(Testing)
print (answer.solution())
| 55.158076
| 135
| 0.633169
| 3,028
| 32,102
| 6.709049
| 0.032695
| 0.071868
| 0.090081
| 0.09648
| 0.900172
| 0.899828
| 0.88506
| 0.88506
| 0.883928
| 0.877824
| 0
| 0.076346
| 0.252103
| 32,102
| 582
| 136
| 55.158076
| 0.769795
| 0.071584
| 0
| 0.784689
| 0
| 0
| 0.020214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021531
| false
| 0
| 0.002392
| 0.009569
| 0.045455
| 0.002392
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1693904e6b58d878c966f5703b0562b8b9d4775d
| 119
|
py
|
Python
|
python/testData/inspections/AddCallSuperOptionalAndRequiredParamsNameCollision_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/AddCallSuperOptionalAndRequiredParamsNameCollision_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/AddCallSuperOptionalAndRequiredParamsNameCollision_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
def __init__(self, a):
pass
class B(A):
def __init__(self, a=1):
A.__init__(self, a)
| 14.875
| 28
| 0.537815
| 19
| 119
| 2.736842
| 0.421053
| 0.461538
| 0.519231
| 0.461538
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.319328
| 119
| 8
| 29
| 14.875
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
16a6de77129242943514b33a5973c458ebe638dc
| 19,810
|
py
|
Python
|
src/undefined/Calculator.py
|
cs107-undefined/cs107-FinalProject
|
d950346a7c677ca5a0e12d103be60f18a29a6d96
|
[
"MIT"
] | 4
|
2021-12-11T22:21:22.000Z
|
2021-12-19T22:01:24.000Z
|
src/undefined/Calculator.py
|
cs107-undefined/cs107-FinalProject
|
d950346a7c677ca5a0e12d103be60f18a29a6d96
|
[
"MIT"
] | 27
|
2021-11-07T17:50:15.000Z
|
2021-12-11T20:43:14.000Z
|
src/undefined/Calculator.py
|
cs107-undefined/cs107-FinalProject
|
d950346a7c677ca5a0e12d103be60f18a29a6d96
|
[
"MIT"
] | 2
|
2021-12-11T21:14:33.000Z
|
2021-12-15T04:32:36.000Z
|
import numpy as np
import sys
# # temp solution for directory.
sys.path.append("./src/")
import math
from undefined.UDFunction import UDFunction
from undefined.GraphGenerator import UDGraph
from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc
def cos(udobject):
"""calculate the cosine operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function
Raises:
TypeError:raised if input is not compatiable with cosine operation
Returns:
if input is udfunction object,update val and der by cosine operation.
if input is UDGraph object,update notes and function by cosine operation.
if input is int,float,ndarray object,update them in cosine operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.cos(udobject._val)
new_der = - 1 * math.sin(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.cos(udobject._val)
new_der = -1 * np.sin(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.COS
if isinstance(udobject._val, (int, float)):
new_val = math.cos(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.cos(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.cos(udobject)
elif isinstance(udobject, (int, float)):
return math.cos(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sin(udobject):
"""calculate the sin operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with sin operation
Returns:
if input is udfunction object,update val and der by sin operation.
if input is UDGraph object,update notes and function by sin operation.
if input is int,float,ndarray object,update them in sin operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.sin(udobject._val)
new_der = math.cos(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.sin(udobject._val)
new_der = np.cos(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.SIN
if isinstance(udobject._val, (int, float)):
new_val = math.sin(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.sin(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.sin(udobject)
elif isinstance(udobject, (int, float)):
return math.sin(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def tan(udobject):
"""calculate the tangent operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with tangent operation
Returns:
if input is udfunction object,update val and der by tangent operation.
if input is UDGraph object,update notes and function by tangent operation.
if input is int,float,ndarray object,update them in tangent operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
check_division_by_zero(math.cos(udobject._val))
new_val = math.tan(udobject._val)
new_der = (1 / (math.cos(udobject._val)) ** 2) * udobject._der
elif isinstance(udobject._val, np.ndarray):
check_division_by_zero(np.cos(udobject._val))
new_val = np.tan(udobject._val)
new_der = (1 / (np.cos(udobject._val)) ** 2) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.TAN
if isinstance(udobject._val, (int, float)):
check_division_by_zero(math.cos(udobject._val))
new_val = math.tan(udobject._val)
elif isinstance(udobject._val, np.ndarray):
check_division_by_zero(np.cos(udobject._val))
new_val = np.tan(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_division_by_zero(np.cos(udobject))
return np.tan(udobject)
elif isinstance(udobject, (int, float)):
check_division_by_zero(math.cos(udobject))
return math.tan(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sinh(udobject):
"""calculate the sinh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the sinh operation.
"""
return (exp(udobject) - exp(-udobject)) / 2
def cosh(udobject):
"""calculate the cosh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the cosh operation.
"""
return (exp(udobject) + exp(-udobject)) / 2
def tanh(udobject):
"""calculate the tanh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the tanh operation.
"""
return sinh(udobject) / cosh(udobject)
def coth(udobject):
"""calculate the coth operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the coth operation.
"""
return cosh(udobject) / sinh(udobject)
def sech(udobject):
"""calculate the sech operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the sech operation.
"""
return 1 / cosh(udobject)
def csch(udobject):
"""calculate the csch operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the csch operation.
"""
return 1 / sinh(udobject)
def arccos(udobject):
"""calculate the arccos operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arccos operation
Returns:
if input is udfunction object,update val and der by arccos operation.
if input is UDGraph object,update notes and function by arccos operation.
if input is int,float,ndarray object,update them in arccos operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_arc(udobject._val)
if isinstance(udobject._val, (int, float)):
new_val = math.acos(udobject._val)
new_der = (-1 / math.sqrt(1 - udobject._val**2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arccos(udobject._val)
new_der = (-1 / np.sqrt(1 - udobject._val**2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_arc(udobject._val)
new_func = UDPrimitive.ACOS
if isinstance(udobject._val, (int, float)):
new_val = math.acos(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arccos(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_arc(udobject)
return np.arccos(udobject)
elif isinstance(udobject, (int, float)):
check_arc(udobject)
return math.acos(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def arcsin(udobject):
"""calculate the arcsin operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arcsin operation
Returns:
if input is udfunction object,update val and der by arcsin operation.
if input is UDGraph object,update notes and function by arcsin operation.
if input is int,float,ndarray object,update them in arcsin operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_arc(udobject._val)
if isinstance(udobject._val, (int, float)):
new_val = math.asin(udobject._val)
new_der = (1 / math.sqrt(1 - udobject._val**2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arcsin(udobject._val)
new_der = (1 / np.sqrt(1 - udobject._val**2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_arc(udobject._val)
new_func = UDPrimitive.ASIN
if isinstance(udobject._val, (int, float)):
new_val = math.asin(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arcsin(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_arc(udobject)
return np.arcsin(udobject)
elif isinstance(udobject, (int, float)):
check_arc(udobject)
return math.asin(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def arctan(udobject):
"""calculate the arctan operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arctan operation
Returns:
if input is udfunction object,update val and der by arctan operation.
if input is UDGraph object,update notes and function by arctan operation.
if input is int,float,ndarray object,update them in arctan operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.atan(udobject._val)
new_der = (1 / (1 + udobject._val ** 2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arctan(udobject._val)
new_der = (1 / (1 + udobject._val ** 2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.ATAN
if isinstance(udobject._val, (int, float)):
new_val = math.atan(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arctan(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.arctan(udobject)
elif isinstance(udobject, (int, float)):
return math.atan(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sqrt(udobject):
"""calculate the square root operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with square root operation
Returns:
if input is udfunction object,update val and der by square root operation.
if input is UDGraph object,update notes and function by square root operation.
if input is int,float,ndarray object,update them in square root operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_pow(udobject._val, 0.5)
if isinstance(udobject._val, (int, float)):
new_val = math.sqrt(udobject._val)
new_der = 0.5 * math.pow(udobject._val, -0.5) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.sqrt(udobject._val)
new_der = 0.5 * np.power(udobject._val, -0.5) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_pow(udobject._val, 0.5)
new_func = UDPrimitive.SQRT
if isinstance(udobject._val, (int, float)):
new_val = math.sqrt(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.sqrt(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_pow(udobject, 0.5)
return np.sqrt(udobject)
elif isinstance(udobject, (int, float)):
check_pow(udobject, 0.5)
return math.sqrt(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def exp(udobject):
"""calculate the square exponential of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with exponential operation
Returns:
if input is udfunction object,update val and der by exponential operation.
if input is UDGraph object,update notes and function by exponential operation.
if input is int,float,ndarray object,update them in exponential operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.exp(udobject._val)
new_der = math.exp(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.exp(udobject._val)
new_der = np.exp(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.EXP
if isinstance(udobject._val, (int, float)):
new_val = math.exp(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.exp(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.exp(udobject)
elif isinstance(udobject, (int, float)):
return math.exp(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def standard_logistic(udobject):
"""this is the function we calculate the standard logistic.
It is different than the log() function
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
return the standard logistic results.
"""
return 1 / (1 + exp(-udobject))
def log(udobject, base=math.e):
"""calculate the log of input.
We can handle the any bases in this log. Users can pass in the base argument.
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with log operation
Returns:
if input is udfunction object,update val and der by log operation.
if input is UDGraph object,update notes and function by log operation.
if input is int,float,ndarray object,update them in log operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_log(udobject._val, base)
if isinstance(udobject._val, (int, float)):
new_val = math.log(udobject._val, base)
new_der = 1 / (math.log(base) * udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.log(udobject._val)
new_val = new_val / math.log(base)
new_der = 1 / (math.log(base) * udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_log(udobject._val, base)
new_func = UDPrimitive.LOG
if isinstance(udobject._val, (int, float)):
new_val = math.log(udobject._val, base)
elif isinstance(udobject._val, np.ndarray):
new_val = np.log(udobject._val) / math.log(base)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
udgraph._params["base"] = base
return udgraph
elif isinstance(udobject, np.ndarray):
check_log(udobject, base)
return np.log(udobject) / math.log(base)
elif isinstance(udobject, (int, float)):
check_log(udobject, base)
return math.log(udobject, base)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
| 36.348624
| 107
| 0.660676
| 2,437
| 19,810
| 5.258925
| 0.049241
| 0.087547
| 0.077247
| 0.048455
| 0.887016
| 0.876248
| 0.854947
| 0.846442
| 0.821239
| 0.81211
| 0
| 0.003232
| 0.250328
| 19,810
| 544
| 108
| 36.415441
| 0.85974
| 0.30101
| 0
| 0.740741
| 0
| 0
| 0.110575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053872
| false
| 0
| 0.020202
| 0
| 0.218855
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16c902d89e8420dbef8117fb4d5b99c470f340cb
| 45
|
py
|
Python
|
droput_message/droput_msg/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | 2
|
2022-03-17T08:08:07.000Z
|
2022-03-17T21:38:54.000Z
|
droput_message/droput_msg/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | null | null | null |
droput_message/droput_msg/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | null | null | null |
from droput_msg.droput_msg import create_app
| 22.5
| 44
| 0.888889
| 8
| 45
| 4.625
| 0.75
| 0.486486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
16ce40272b45ec1dc6e62dbf84336360c1038014
| 311
|
py
|
Python
|
infcommon/docker_compose/factory.py
|
aleasoluciones/infcommon
|
cdd64dacba6b1219e511b3410168434080c668da
|
[
"MIT"
] | null | null | null |
infcommon/docker_compose/factory.py
|
aleasoluciones/infcommon
|
cdd64dacba6b1219e511b3410168434080c668da
|
[
"MIT"
] | 1
|
2021-03-26T09:16:07.000Z
|
2021-03-26T09:16:07.000Z
|
infcommon/docker_compose/factory.py
|
aleasoluciones/infcommon3
|
5be559b741ec447ad54ec232efa013f2fb3af18a
|
[
"MIT"
] | null | null | null |
from infcommon.factory import Factory
from infcommon.docker_compose.docker_compose import DockerComposeService
def docker_compose_service(base_dir=None, docker_compose_file_name=None):
return Factory.instance('docker_compose_service', lambda: DockerComposeService(base_dir, docker_compose_file_name))
| 44.428571
| 123
| 0.855305
| 39
| 311
| 6.461538
| 0.435897
| 0.309524
| 0.15873
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083601
| 311
| 6
| 124
| 51.833333
| 0.884211
| 0
| 0
| 0
| 0
| 0
| 0.07074
| 0.07074
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
bc5e90e9733f74102bf0de3613807a3bca55980c
| 34
|
py
|
Python
|
gunicorn_config.py
|
bbc/connected-data-mistletoe
|
6c6a5e4137d1965261b18e7ea42bca0e313c49a6
|
[
"MIT"
] | null | null | null |
gunicorn_config.py
|
bbc/connected-data-mistletoe
|
6c6a5e4137d1965261b18e7ea42bca0e313c49a6
|
[
"MIT"
] | null | null | null |
gunicorn_config.py
|
bbc/connected-data-mistletoe
|
6c6a5e4137d1965261b18e7ea42bca0e313c49a6
|
[
"MIT"
] | null | null | null |
bind = "0.0.0.0:5004"
workers = 2
| 11.333333
| 21
| 0.588235
| 8
| 34
| 2.5
| 0.625
| 0.3
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 0.176471
| 34
| 2
| 22
| 17
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc71acb6f4e50529ee5875ae8ef8acc591f077c2
| 6,545
|
py
|
Python
|
Utils/Data/Features/Generated/EngagerFeature/EngagerKnowTweetLanguage.py
|
MaurizioFD/recsys-challenge-2020-twitter
|
95dc024fb4f8777aa62e1304536daece640428de
|
[
"Apache-2.0"
] | 44
|
2020-07-09T11:31:17.000Z
|
2022-03-04T05:50:48.000Z
|
Utils/Data/Features/Generated/EngagerFeature/EngagerKnowTweetLanguage.py
|
kiminh/recsys-challenge-2020-twitter
|
567f0db40be7db3d21c360f2ca6cdf2addc7c698
|
[
"Apache-2.0"
] | 3
|
2020-10-02T18:55:21.000Z
|
2020-10-13T22:13:58.000Z
|
Utils/Data/Features/Generated/EngagerFeature/EngagerKnowTweetLanguage.py
|
kiminh/recsys-challenge-2020-twitter
|
567f0db40be7db3d21c360f2ca6cdf2addc7c698
|
[
"Apache-2.0"
] | 9
|
2020-08-08T14:55:59.000Z
|
2021-09-06T09:17:03.000Z
|
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set
from Utils.Data.Features.Generated.TweetFeature.IsEngagementType import *
from Utils.Data.Features.MappedFeatures import *
class EngagerFeatureKnowTweetLanguage(GeneratedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("engager_feature_know_tweet_language", dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/know_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/know_language/{self.feature_name}.csv.gz")
def create_feature(self):
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
# Load the necessary features
creator_id_feature = MappedFeatureCreatorId(train_dataset_id)
engager_id_feature = MappedFeatureEngagerId(train_dataset_id)
language_id_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsLike(train_dataset_id)
# Load the dataframes
creator_id_df = creator_id_feature.load_or_create()
engager_id_df = engager_id_feature.load_or_create()
language_id_df = language_id_feature.load_or_create()
engagement_df = engagement_feature.load_or_create()
# Concatenate the dataframes
dataframe = pd.concat([
creator_id_df,
engager_id_df,
language_id_df,
engagement_df
],
axis=1
)
# Filter the negative interactions
positive_dataframe = dataframe[dataframe[engagement_feature.feature_name]]
# Let's compute the known language when the user is creator
dictionary_creator_df = pd.DataFrame(positive_dataframe[[
creator_id_feature.feature_name,
language_id_feature.feature_name,
engagement_feature.feature_name
]].groupby([creator_id_feature.feature_name, language_id_feature.feature_name]).first())
dictionary_creator_df.columns = ['users']
dictionary_creator = dictionary_creator_df.to_dict()['users']
# Let's compute the known language when the user is engager
dictionary_engager_df = pd.DataFrame(positive_dataframe[[
engager_id_feature.feature_name,
language_id_feature.feature_name,
engagement_feature.feature_name
]].groupby([engager_id_feature.feature_name, language_id_feature.feature_name]).first())
dictionary_engager_df.columns = ['users']
dictionary_engager = dictionary_engager_df.to_dict()['users']
# Merge the two dictionaries
dictionary_user = {**dictionary_creator, **dictionary_engager}
# Load the test information
test_engager_id_feature = MappedFeatureEngagerId(self.dataset_id)
test_tweet_langugage_feature = MappedFeatureTweetLanguage(self.dataset_id)
test_engager_id_df = test_engager_id_feature.load_or_create()
test_tweet_langugage_df = test_tweet_langugage_feature.load_or_create()
test_dataframe = pd.concat([
test_engager_id_df,
test_tweet_langugage_df
],
axis=1
)
# Apply the super duper dictionary
result_df = pd.DataFrame(
test_dataframe[[
engager_id_feature.feature_name,
language_id_feature.feature_name
]].apply(lambda x: dictionary_user.get((x[0], x[1]), False), axis=1))
# Save back the dataframe
self.save_feature(result_df)
else:
# Load the necessary features
creator_id_feature = MappedFeatureCreatorId(self.dataset_id)
engager_id_feature = MappedFeatureEngagerId(self.dataset_id)
language_id_feature = MappedFeatureTweetLanguage(self.dataset_id)
engagement_feature = TweetFeatureEngagementIsLike(self.dataset_id)
# Load the dataframes
creator_id_df = creator_id_feature.load_or_create()
engager_id_df = engager_id_feature.load_or_create()
language_id_df = language_id_feature.load_or_create()
engagement_df = engagement_feature.load_or_create()
# Concatenate the dataframes
dataframe = pd.concat([
creator_id_df,
engager_id_df,
language_id_df,
engagement_df
],
axis=1
)
# Filter the negative interactions
positive_dataframe = dataframe[dataframe[engagement_feature.feature_name]]
# Let's compute the known language when the user is creator
dictionary_creator_df = pd.DataFrame(positive_dataframe[[
creator_id_feature.feature_name,
language_id_feature.feature_name,
engagement_feature.feature_name
]].groupby([creator_id_feature.feature_name, language_id_feature.feature_name]).first())
dictionary_creator_df.columns = ['users']
dictionary_creator = dictionary_creator_df.to_dict()['users']
# Let's compute the known language when the user is engager
dictionary_engager_df = pd.DataFrame(positive_dataframe[[
engager_id_feature.feature_name,
language_id_feature.feature_name,
engagement_feature.feature_name
]].groupby([engager_id_feature.feature_name, language_id_feature.feature_name]).first())
dictionary_engager_df.columns = ['users']
dictionary_engager = dictionary_engager_df.to_dict()['users']
# Merge the two dictionaries
dictionary_user = {**dictionary_creator, **dictionary_engager}
# Apply the super duper dictionary
result_df = pd.DataFrame(
dataframe[[
engager_id_feature.feature_name,
language_id_feature.feature_name
]].apply(lambda x: dictionary_user.get((x[0], x[1]), False), axis=1))
# Save back the dataframe
self.save_feature(result_df)
| 42.777778
| 104
| 0.645531
| 707
| 6,545
| 5.561528
| 0.128713
| 0.077823
| 0.119023
| 0.101729
| 0.879451
| 0.81765
| 0.772126
| 0.736521
| 0.704985
| 0.666328
| 0
| 0.001922
| 0.284645
| 6,545
| 152
| 105
| 43.059211
| 0.83789
| 0.097937
| 0
| 0.66
| 1
| 0
| 0.042658
| 0.03586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.03
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc878241629133623a92adcd68bc5a4b335aa562
| 24,925
|
py
|
Python
|
unisender/south_migrations/0001_initial.py
|
ITCase-django/django-unisender
|
d9d269cb5074967c22a756bff01db48c94b044bc
|
[
"MIT"
] | 2
|
2015-04-09T13:16:41.000Z
|
2017-12-06T10:07:09.000Z
|
unisender/south_migrations/0001_initial.py
|
ITCase-django/django-unisender
|
d9d269cb5074967c22a756bff01db48c94b044bc
|
[
"MIT"
] | 2
|
2015-01-20T12:02:50.000Z
|
2017-04-07T07:16:48.000Z
|
unisender/south_migrations/0001_initial.py
|
ITCase-django/django-unisender
|
d9d269cb5074967c22a756bff01db48c94b044bc
|
[
"MIT"
] | 1
|
2022-02-22T13:34:40.000Z
|
2022-02-22T13:34:40.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table(u'unisender_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'unisender', ['Tag'])
# Adding model 'Field'
db.create_table(u'unisender_field', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('field_type', self.gf('django.db.models.fields.CharField')(default='string', max_length=50)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True)),
('sort', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
))
db.send_create_signal(u'unisender', ['Field'])
# Adding model 'SubscribeList'
db.create_table(u'unisender_subscribelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('before_subscribe_url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('after_subscribe_url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'unisender', ['SubscribeList'])
# Adding model 'Subscriber'
db.create_table(u'unisender_subscriber', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('contact_type', self.gf('django.db.models.fields.CharField')(default='email', max_length=50)),
('contact', self.gf('django.db.models.fields.CharField')(max_length=255)),
('double_optin', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
))
db.send_create_signal(u'unisender', ['Subscriber'])
# Adding M2M table for field list_ids on 'Subscriber'
m2m_table_name = db.shorten_name(u'unisender_subscriber_list_ids')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('subscriber', models.ForeignKey(orm[u'unisender.subscriber'], null=False)),
('subscribelist', models.ForeignKey(orm[u'unisender.subscribelist'], null=False))
))
db.create_unique(m2m_table_name, ['subscriber_id', 'subscribelist_id'])
# Adding M2M table for field tags on 'Subscriber'
m2m_table_name = db.shorten_name(u'unisender_subscriber_tags')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('subscriber', models.ForeignKey(orm[u'unisender.subscriber'], null=False)),
('tag', models.ForeignKey(orm[u'unisender.tag'], null=False))
))
db.create_unique(m2m_table_name, ['subscriber_id', 'tag_id'])
# Adding model 'SubscriberFields'
db.create_table(u'unisender_subscriberfields', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('subscriber', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fields', to=orm['unisender.Subscriber'])),
('field', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['unisender.Field'])),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'unisender', ['SubscriberFields'])
# Adding model 'EmailMessage'
db.create_table(u'unisender_emailmessage', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('sender_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('sender_email', self.gf('django.db.models.fields.CharField')(max_length=255)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255)),
('body', self.gf('tinymce_4.fields.TinyMCEModelField')()),
('list_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='emails', to=orm['unisender.SubscribeList'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='emails', null=True, to=orm['unisender.Tag'])),
('lang', self.gf('django.db.models.fields.CharField')(default='ru', max_length=50)),
('text_body', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('generate_text', self.gf('django.db.models.fields.CharField')(default='1', max_length=50)),
('wrap_type', self.gf('django.db.models.fields.CharField')(default='skip', max_length=50)),
('categories', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('series_day', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('series_time', self.gf('django.db.models.fields.TimeField')(default=datetime.datetime(2014, 7, 8, 0, 0))),
))
db.send_create_signal(u'unisender', ['EmailMessage'])
# Adding model 'SmsMessage'
db.create_table(u'unisender_smsmessage', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'unisender', ['SmsMessage'])
# Adding model 'Campaign'
db.create_table(u'unisender_campaign', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('unisender_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_error', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('sync', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email_message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['unisender.EmailMessage'])),
('start_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('track_read', self.gf('django.db.models.fields.CharField')(default='0', max_length=50)),
('track_links', self.gf('django.db.models.fields.CharField')(default='0', max_length=50)),
('track_ga', self.gf('django.db.models.fields.CharField')(default='0', max_length=50)),
('payment_limit', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default=None, max_length=50, null=True, blank=True)),
('last_check', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('not_sent', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('ok_delivered', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('ok_read', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('ok_spam_folder', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('ok_link_visited', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('ok_unsubscribed', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_user_unknown', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_user_inactive', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_mailbox_full', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_spam_rejected', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_spam_folder', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_delivery_failed', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_will_retry', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_resend', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_domain_inactive', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_skip_letter', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_spam_skipped', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_spam_retry', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_unsubscribed', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_src_invalid', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_dest_invalid', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_not_allowed', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_not_available', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_lost', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('err_internal', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('total', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
))
db.send_create_signal(u'unisender', ['Campaign'])
# Adding M2M table for field contacts on 'Campaign'
m2m_table_name = db.shorten_name(u'unisender_campaign_contacts')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('campaign', models.ForeignKey(orm[u'unisender.campaign'], null=False)),
('subscriber', models.ForeignKey(orm[u'unisender.subscriber'], null=False))
))
db.create_unique(m2m_table_name, ['campaign_id', 'subscriber_id'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table(u'unisender_tag')
# Deleting model 'Field'
db.delete_table(u'unisender_field')
# Deleting model 'SubscribeList'
db.delete_table(u'unisender_subscribelist')
# Deleting model 'Subscriber'
db.delete_table(u'unisender_subscriber')
# Removing M2M table for field list_ids on 'Subscriber'
db.delete_table(db.shorten_name(u'unisender_subscriber_list_ids'))
# Removing M2M table for field tags on 'Subscriber'
db.delete_table(db.shorten_name(u'unisender_subscriber_tags'))
# Deleting model 'SubscriberFields'
db.delete_table(u'unisender_subscriberfields')
# Deleting model 'EmailMessage'
db.delete_table(u'unisender_emailmessage')
# Deleting model 'SmsMessage'
db.delete_table(u'unisender_smsmessage')
# Deleting model 'Campaign'
db.delete_table(u'unisender_campaign')
# Removing M2M table for field contacts on 'Campaign'
db.delete_table(db.shorten_name(u'unisender_campaign_contacts'))
models = {
u'unisender.campaign': {
'Meta': {'ordering': "('name',)", 'object_name': 'Campaign'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'campaign'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['unisender.Subscriber']"}),
'email_message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['unisender.EmailMessage']"}),
'err_delivery_failed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_dest_invalid': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_domain_inactive': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_internal': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_lost': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_mailbox_full': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_not_allowed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_not_available': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_resend': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_skip_letter': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_folder': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_rejected': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_retry': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_skipped': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_src_invalid': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_unsubscribed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_user_inactive': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_user_unknown': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_will_retry': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'not_sent': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_delivered': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_link_visited': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_read': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_spam_folder': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_unsubscribed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'payment_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'track_ga': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'track_links': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'track_read': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.emailmessage': {
'Meta': {'ordering': "('subject',)", 'object_name': 'EmailMessage'},
'body': ('tinymce_4.fields.TinyMCEModelField', [], {}),
'categories': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'generate_text': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': "'ru'", 'max_length': '50'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'list_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'to': u"orm['unisender.SubscribeList']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'series_day': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'series_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2014, 7, 8, 0, 0)'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emails'", 'null': 'True', 'to': u"orm['unisender.Tag']"}),
'text_body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'wrap_type': ('django.db.models.fields.CharField', [], {'default': "'skip'", 'max_length': '50'})
},
u'unisender.field': {
'Meta': {'ordering': "('name',)", 'object_name': 'Field'},
'field_type': ('django.db.models.fields.CharField', [], {'default': "'string'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'unisender.smsmessage': {
'Meta': {'object_name': 'SmsMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscribelist': {
'Meta': {'ordering': "('title',)", 'object_name': 'SubscribeList'},
'after_subscribe_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'before_subscribe_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscriber': {
'Meta': {'ordering': "('contact',)", 'object_name': 'Subscriber'},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'contact_type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '50'}),
'double_optin': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'list_ids': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subscribers'", 'symmetrical': 'False', 'to': u"orm['unisender.SubscribeList']"}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscribers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['unisender.Tag']"}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscriberfields': {
'Meta': {'object_name': 'SubscriberFields'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['unisender.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['unisender.Subscriber']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'unisender.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['unisender']
| 76.457055
| 207
| 0.619699
| 2,786
| 24,925
| 5.409189
| 0.059943
| 0.097678
| 0.170007
| 0.242867
| 0.880358
| 0.834041
| 0.82349
| 0.79509
| 0.758593
| 0.688985
| 0
| 0.014124
| 0.1734
| 24,925
| 326
| 208
| 76.457055
| 0.717323
| 0.030211
| 0
| 0.301818
| 0
| 0
| 0.512485
| 0.325562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007273
| false
| 0
| 0.014545
| 0
| 0.032727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bcb3440d2bbb687c791235134ad1d156c938355d
| 148
|
py
|
Python
|
delft3d/__init__.py
|
Carlisle345748/Delft3D-Toolbox
|
47267205c7a5b442daf35f3a5d2aaf8d3fc76ff0
|
[
"MIT"
] | 10
|
2020-05-12T13:38:23.000Z
|
2022-02-24T09:49:10.000Z
|
delft3d/__init__.py
|
Carlisle345748/Delft3D-Toolbox
|
47267205c7a5b442daf35f3a5d2aaf8d3fc76ff0
|
[
"MIT"
] | 1
|
2021-05-27T15:42:41.000Z
|
2021-06-04T13:27:48.000Z
|
delft3d/__init__.py
|
Carlisle345748/Delft3D-Toolbox
|
47267205c7a5b442daf35f3a5d2aaf8d3fc76ff0
|
[
"MIT"
] | 2
|
2021-08-31T16:54:08.000Z
|
2022-02-15T22:50:26.000Z
|
from .GrdFile import *
from .MdfFile import *
from .TimeSeriesFile import *
from .GrdFile import *
from .DepFile import *
from .Simulation import *
| 21.142857
| 29
| 0.756757
| 18
| 148
| 6.222222
| 0.388889
| 0.446429
| 0.303571
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 148
| 6
| 30
| 24.666667
| 0.903226
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4c211025471acb13f56994d9a1db3420c72f7fd9
| 12,383
|
py
|
Python
|
CodeIA/venv/Lib/site-packages/coremltools/converters/mil/mil/passes/test_noop_elimination.py
|
Finasty-lab/IA-Python
|
286113504906fec11a5aa5fd1d12e38536b1c859
|
[
"Apache-2.0"
] | 3
|
2018-10-02T17:23:01.000Z
|
2020-08-15T04:47:07.000Z
|
coremltools/converters/mil/mil/passes/test_noop_elimination.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/test_noop_elimination.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | 1
|
2021-05-07T15:38:20.000Z
|
2021-05-07T15:38:20.000Z
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
import copy
import pytest
import itertools
import numpy as np
@pytest.mark.parametrize("op_type, pos, val", itertools.product(['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'], ['x', 'y'], [0, 1, [0, 0, 0, 0], [1, 1, 1, 1]]))
def test_elementwise_elimination(op_type, pos, val):
if 'div' in op_type and np.prod(val) == 0:
return
if 'pow' in op_type and (val != 0 or val != 1):
return
test_op = getattr(mb, op_type)
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
if pos == "x":
r1 = test_op(x=val, y=x)
else:
r1 = test_op(x=x, y=val)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = [op_type, "relu"]
new_program = original_program
if op_type in {'add'}:
if val == 0 or val == [0, 0, 0, 0]:
new_program = ["relu"]
elif op_type in {'mul'}:
if val == 1 or val == [1, 1, 1, 1]:
new_program = ["relu"]
elif op_type in {'pow', 'real_div', 'floor_div'}:
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["relu"]
elif op_type in {'sub'}:
if pos == 'y' and (val == 0 or val == [0, 0, 0, 0]):
new_program = ["relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == new_program
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_broadcast():
@mb.program(input_specs=[mb.TensorSpec(shape=[4])])
def prog(x):
r1 = mb.add(x=x, y=[[0, 0, 0, 0], [0, 0, 0, 0]])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = ["add", "relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == original_program
assert_model_is_valid(
prog,
{"x": [4]},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_reshape_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.reshape(x=x, shape=[1, 8])
r2 = mb.reshape(x=r1, shape=[1, 8])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"]
assert get_op_types_in_program(prog) == ["reshape", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (1, 8)},
)
def test_oneway_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, num_splits=1, axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_full_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, split_sizes=[4], axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_to_end_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
@pytest.mark.parametrize("begin_mask, end_mask",
itertools.product(itertools.product([True, False],[True, False]),
itertools.product([True, False],[True, False])))
def test_slicebyindex_mask_elimination(begin_mask, end_mask):
@mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))])
def prog(x):
begin = [1, 1]
end = [1, 1]
for i in range(2):
if not begin_mask[i]:
begin[i] = 0
if not end_mask[i]:
end[i] = 4
r1 = mb.slice_by_index(x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (4, 4)},
expected_output_shapes={block.outputs[0].name: (4, 4)},
)
def test_pad_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[0, 0, 0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_pad():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[4, 4, 2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["pad", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (10, 8)},
)
def test_tile_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[1, 1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_tile():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["tile", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (4, 8)},
)
def test_upsample_nearest_neighbor_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_nearest_neighbor(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_upsample_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_bilinear(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_resize_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_crop_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["crop", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_linear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
| 32.41623
| 167
| 0.616652
| 1,809
| 12,383
| 3.928137
| 0.085683
| 0.01351
| 0.052069
| 0.062482
| 0.8179
| 0.798902
| 0.787644
| 0.780045
| 0.760062
| 0.754433
| 0
| 0.028523
| 0.229912
| 12,383
| 381
| 168
| 32.501312
| 0.716653
| 0.016797
| 0
| 0.585443
| 0
| 0
| 0.07356
| 0.037561
| 0
| 0
| 0
| 0
| 0.174051
| 1
| 0.113924
| false
| 0.063291
| 0.022152
| 0
| 0.199367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d5e9e8d1f0c3dea7916a54d1d4cb4c82992a8ba7
| 188
|
py
|
Python
|
tacotron2_model/__init__.py
|
BenAAndrew/tacotron2-model
|
cd2aaf605f94e97225319fbf876e4213ae517b40
|
[
"BSD-3-Clause"
] | 4
|
2021-01-24T22:55:13.000Z
|
2021-08-11T12:36:53.000Z
|
tacotron2_model/__init__.py
|
BenAAndrew/tacotron2-model
|
cd2aaf605f94e97225319fbf876e4213ae517b40
|
[
"BSD-3-Clause"
] | null | null | null |
tacotron2_model/__init__.py
|
BenAAndrew/tacotron2-model
|
cd2aaf605f94e97225319fbf876e4213ae517b40
|
[
"BSD-3-Clause"
] | null | null | null |
from tacotron2_model.model import Tacotron2
from tacotron2_model.loss import Tacotron2Loss
from tacotron2_model.collate import TextMelCollate
from tacotron2_model.stft import TacotronSTFT
| 37.6
| 50
| 0.893617
| 24
| 188
| 6.833333
| 0.416667
| 0.317073
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034884
| 0.085106
| 188
| 4
| 51
| 47
| 0.918605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d5f15b339269de91803c06694887b8a3333ed84a
| 359,776
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/layout/scene/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | 2
|
2020-04-11T19:28:30.000Z
|
2020-05-04T03:16:20.000Z
|
packages/python/plotly/plotly/graph_objs/layout/scene/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/scene/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class ZAxis(_BaseLayoutHierarchyType):
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# backgroundcolor
# ---------------
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
# hoverformat
# -----------
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
# linecolor
# ---------
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
# linewidth
# ---------
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
# mirror
# ------
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# range
# -----
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# rangemode
# ---------
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If *tozero*`, the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showaxeslabels
# --------------
@property
def showaxeslabels(self):
"""
Sets whether or not this axis is labeled
The 'showaxeslabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showaxeslabels"]
@showaxeslabels.setter
def showaxeslabels(self, val):
self["showaxeslabels"] = val
# showbackground
# --------------
@property
def showbackground(self):
"""
Sets whether or not this axis' wall has a background color.
The 'showbackground' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showbackground"]
@showbackground.setter
def showbackground(self, val):
self["showbackground"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showgrid
# --------
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
# showline
# --------
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
# showspikes
# ----------
@property
def showspikes(self):
"""
Sets whether or not spikes starting from data points to this
axis' wall are shown on hover.
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# spikecolor
# ----------
@property
def spikecolor(self):
"""
Sets the color of the spikes.
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
# spikesides
# ----------
@property
def spikesides(self):
"""
Sets whether or not spikes extending from the projection data
points to this axis' wall boundaries are shown on hover.
The 'spikesides' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["spikesides"]
@spikesides.setter
def spikesides(self, val):
self["spikesides"] = val
# spikethickness
# --------------
@property
def spikethickness(self):
"""
Sets the thickness (in px) of the spikes.
The 'spikethickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.zaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.zaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.scene.zaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.layout.scene.zaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.scene.zaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.scene.zaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.zaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.scene.zaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.zaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
Returns
-------
plotly.graph_objs.layout.scene.zaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use layout.scene.zaxis.title.font instead.
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.zaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# type
# ----
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# zeroline
# --------
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
# zerolinecolor
# -------------
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
# zerolinewidth
# -------------
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.za
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.zaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.zaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.zaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.zaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
_mapped_properties = {"titlefont": ("title", "font")}
def __init__(
self,
arg=None,
autorange=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
linecolor=None,
linewidth=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs
):
"""
Construct a new ZAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.ZAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.za
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.zaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.zaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.zaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.zaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
ZAxis
"""
super(ZAxis, self).__init__("zaxis")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.ZAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.ZAxis`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import zaxis as v_zaxis
# Initialize validators
# ---------------------
self._validators["autorange"] = v_zaxis.AutorangeValidator()
self._validators["backgroundcolor"] = v_zaxis.BackgroundcolorValidator()
self._validators["calendar"] = v_zaxis.CalendarValidator()
self._validators["categoryarray"] = v_zaxis.CategoryarrayValidator()
self._validators["categoryarraysrc"] = v_zaxis.CategoryarraysrcValidator()
self._validators["categoryorder"] = v_zaxis.CategoryorderValidator()
self._validators["color"] = v_zaxis.ColorValidator()
self._validators["dtick"] = v_zaxis.DtickValidator()
self._validators["exponentformat"] = v_zaxis.ExponentformatValidator()
self._validators["gridcolor"] = v_zaxis.GridcolorValidator()
self._validators["gridwidth"] = v_zaxis.GridwidthValidator()
self._validators["hoverformat"] = v_zaxis.HoverformatValidator()
self._validators["linecolor"] = v_zaxis.LinecolorValidator()
self._validators["linewidth"] = v_zaxis.LinewidthValidator()
self._validators["mirror"] = v_zaxis.MirrorValidator()
self._validators["nticks"] = v_zaxis.NticksValidator()
self._validators["range"] = v_zaxis.RangeValidator()
self._validators["rangemode"] = v_zaxis.RangemodeValidator()
self._validators["separatethousands"] = v_zaxis.SeparatethousandsValidator()
self._validators["showaxeslabels"] = v_zaxis.ShowaxeslabelsValidator()
self._validators["showbackground"] = v_zaxis.ShowbackgroundValidator()
self._validators["showexponent"] = v_zaxis.ShowexponentValidator()
self._validators["showgrid"] = v_zaxis.ShowgridValidator()
self._validators["showline"] = v_zaxis.ShowlineValidator()
self._validators["showspikes"] = v_zaxis.ShowspikesValidator()
self._validators["showticklabels"] = v_zaxis.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_zaxis.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_zaxis.ShowticksuffixValidator()
self._validators["spikecolor"] = v_zaxis.SpikecolorValidator()
self._validators["spikesides"] = v_zaxis.SpikesidesValidator()
self._validators["spikethickness"] = v_zaxis.SpikethicknessValidator()
self._validators["tick0"] = v_zaxis.Tick0Validator()
self._validators["tickangle"] = v_zaxis.TickangleValidator()
self._validators["tickcolor"] = v_zaxis.TickcolorValidator()
self._validators["tickfont"] = v_zaxis.TickfontValidator()
self._validators["tickformat"] = v_zaxis.TickformatValidator()
self._validators["tickformatstops"] = v_zaxis.TickformatstopsValidator()
self._validators["tickformatstopdefaults"] = v_zaxis.TickformatstopValidator()
self._validators["ticklen"] = v_zaxis.TicklenValidator()
self._validators["tickmode"] = v_zaxis.TickmodeValidator()
self._validators["tickprefix"] = v_zaxis.TickprefixValidator()
self._validators["ticks"] = v_zaxis.TicksValidator()
self._validators["ticksuffix"] = v_zaxis.TicksuffixValidator()
self._validators["ticktext"] = v_zaxis.TicktextValidator()
self._validators["ticktextsrc"] = v_zaxis.TicktextsrcValidator()
self._validators["tickvals"] = v_zaxis.TickvalsValidator()
self._validators["tickvalssrc"] = v_zaxis.TickvalssrcValidator()
self._validators["tickwidth"] = v_zaxis.TickwidthValidator()
self._validators["title"] = v_zaxis.TitleValidator()
self._validators["type"] = v_zaxis.TypeValidator()
self._validators["visible"] = v_zaxis.VisibleValidator()
self._validators["zeroline"] = v_zaxis.ZerolineValidator()
self._validators["zerolinecolor"] = v_zaxis.ZerolinecolorValidator()
self._validators["zerolinewidth"] = v_zaxis.ZerolinewidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autorange", None)
self["autorange"] = autorange if autorange is not None else _v
_v = arg.pop("backgroundcolor", None)
self["backgroundcolor"] = backgroundcolor if backgroundcolor is not None else _v
_v = arg.pop("calendar", None)
self["calendar"] = calendar if calendar is not None else _v
_v = arg.pop("categoryarray", None)
self["categoryarray"] = categoryarray if categoryarray is not None else _v
_v = arg.pop("categoryarraysrc", None)
self["categoryarraysrc"] = (
categoryarraysrc if categoryarraysrc is not None else _v
)
_v = arg.pop("categoryorder", None)
self["categoryorder"] = categoryorder if categoryorder is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("gridcolor", None)
self["gridcolor"] = gridcolor if gridcolor is not None else _v
_v = arg.pop("gridwidth", None)
self["gridwidth"] = gridwidth if gridwidth is not None else _v
_v = arg.pop("hoverformat", None)
self["hoverformat"] = hoverformat if hoverformat is not None else _v
_v = arg.pop("linecolor", None)
self["linecolor"] = linecolor if linecolor is not None else _v
_v = arg.pop("linewidth", None)
self["linewidth"] = linewidth if linewidth is not None else _v
_v = arg.pop("mirror", None)
self["mirror"] = mirror if mirror is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("range", None)
self["range"] = range if range is not None else _v
_v = arg.pop("rangemode", None)
self["rangemode"] = rangemode if rangemode is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showaxeslabels", None)
self["showaxeslabels"] = showaxeslabels if showaxeslabels is not None else _v
_v = arg.pop("showbackground", None)
self["showbackground"] = showbackground if showbackground is not None else _v
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showgrid", None)
self["showgrid"] = showgrid if showgrid is not None else _v
_v = arg.pop("showline", None)
self["showline"] = showline if showline is not None else _v
_v = arg.pop("showspikes", None)
self["showspikes"] = showspikes if showspikes is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("spikecolor", None)
self["spikecolor"] = spikecolor if spikecolor is not None else _v
_v = arg.pop("spikesides", None)
self["spikesides"] = spikesides if spikesides is not None else _v
_v = arg.pop("spikethickness", None)
self["spikethickness"] = spikethickness if spikethickness is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("zeroline", None)
self["zeroline"] = zeroline if zeroline is not None else _v
_v = arg.pop("zerolinecolor", None)
self["zerolinecolor"] = zerolinecolor if zerolinecolor is not None else _v
_v = arg.pop("zerolinewidth", None)
self["zerolinewidth"] = zerolinewidth if zerolinewidth is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class YAxis(_BaseLayoutHierarchyType):
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# backgroundcolor
# ---------------
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
# hoverformat
# -----------
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
# linecolor
# ---------
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
# linewidth
# ---------
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
# mirror
# ------
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# range
# -----
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# rangemode
# ---------
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If *tozero*`, the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showaxeslabels
# --------------
@property
def showaxeslabels(self):
"""
Sets whether or not this axis is labeled
The 'showaxeslabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showaxeslabels"]
@showaxeslabels.setter
def showaxeslabels(self, val):
self["showaxeslabels"] = val
# showbackground
# --------------
@property
def showbackground(self):
"""
Sets whether or not this axis' wall has a background color.
The 'showbackground' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showbackground"]
@showbackground.setter
def showbackground(self, val):
self["showbackground"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showgrid
# --------
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
# showline
# --------
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
# showspikes
# ----------
@property
def showspikes(self):
"""
Sets whether or not spikes starting from data points to this
axis' wall are shown on hover.
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# spikecolor
# ----------
@property
def spikecolor(self):
"""
Sets the color of the spikes.
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
# spikesides
# ----------
@property
def spikesides(self):
"""
Sets whether or not spikes extending from the projection data
points to this axis' wall boundaries are shown on hover.
The 'spikesides' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["spikesides"]
@spikesides.setter
def spikesides(self, val):
self["spikesides"] = val
# spikethickness
# --------------
@property
def spikethickness(self):
"""
Sets the thickness (in px) of the spikes.
The 'spikethickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.scene.yaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.layout.scene.yaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.scene.yaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.scene.yaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use layout.scene.yaxis.title.font instead.
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# type
# ----
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# zeroline
# --------
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
# zerolinecolor
# -------------
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
# zerolinewidth
# -------------
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.yaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
_mapped_properties = {"titlefont": ("title", "font")}
def __init__(
self,
arg=None,
autorange=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
linecolor=None,
linewidth=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs
):
"""
Construct a new YAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.YAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.yaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
YAxis
"""
super(YAxis, self).__init__("yaxis")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.YAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.YAxis`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import yaxis as v_yaxis
# Initialize validators
# ---------------------
self._validators["autorange"] = v_yaxis.AutorangeValidator()
self._validators["backgroundcolor"] = v_yaxis.BackgroundcolorValidator()
self._validators["calendar"] = v_yaxis.CalendarValidator()
self._validators["categoryarray"] = v_yaxis.CategoryarrayValidator()
self._validators["categoryarraysrc"] = v_yaxis.CategoryarraysrcValidator()
self._validators["categoryorder"] = v_yaxis.CategoryorderValidator()
self._validators["color"] = v_yaxis.ColorValidator()
self._validators["dtick"] = v_yaxis.DtickValidator()
self._validators["exponentformat"] = v_yaxis.ExponentformatValidator()
self._validators["gridcolor"] = v_yaxis.GridcolorValidator()
self._validators["gridwidth"] = v_yaxis.GridwidthValidator()
self._validators["hoverformat"] = v_yaxis.HoverformatValidator()
self._validators["linecolor"] = v_yaxis.LinecolorValidator()
self._validators["linewidth"] = v_yaxis.LinewidthValidator()
self._validators["mirror"] = v_yaxis.MirrorValidator()
self._validators["nticks"] = v_yaxis.NticksValidator()
self._validators["range"] = v_yaxis.RangeValidator()
self._validators["rangemode"] = v_yaxis.RangemodeValidator()
self._validators["separatethousands"] = v_yaxis.SeparatethousandsValidator()
self._validators["showaxeslabels"] = v_yaxis.ShowaxeslabelsValidator()
self._validators["showbackground"] = v_yaxis.ShowbackgroundValidator()
self._validators["showexponent"] = v_yaxis.ShowexponentValidator()
self._validators["showgrid"] = v_yaxis.ShowgridValidator()
self._validators["showline"] = v_yaxis.ShowlineValidator()
self._validators["showspikes"] = v_yaxis.ShowspikesValidator()
self._validators["showticklabels"] = v_yaxis.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_yaxis.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_yaxis.ShowticksuffixValidator()
self._validators["spikecolor"] = v_yaxis.SpikecolorValidator()
self._validators["spikesides"] = v_yaxis.SpikesidesValidator()
self._validators["spikethickness"] = v_yaxis.SpikethicknessValidator()
self._validators["tick0"] = v_yaxis.Tick0Validator()
self._validators["tickangle"] = v_yaxis.TickangleValidator()
self._validators["tickcolor"] = v_yaxis.TickcolorValidator()
self._validators["tickfont"] = v_yaxis.TickfontValidator()
self._validators["tickformat"] = v_yaxis.TickformatValidator()
self._validators["tickformatstops"] = v_yaxis.TickformatstopsValidator()
self._validators["tickformatstopdefaults"] = v_yaxis.TickformatstopValidator()
self._validators["ticklen"] = v_yaxis.TicklenValidator()
self._validators["tickmode"] = v_yaxis.TickmodeValidator()
self._validators["tickprefix"] = v_yaxis.TickprefixValidator()
self._validators["ticks"] = v_yaxis.TicksValidator()
self._validators["ticksuffix"] = v_yaxis.TicksuffixValidator()
self._validators["ticktext"] = v_yaxis.TicktextValidator()
self._validators["ticktextsrc"] = v_yaxis.TicktextsrcValidator()
self._validators["tickvals"] = v_yaxis.TickvalsValidator()
self._validators["tickvalssrc"] = v_yaxis.TickvalssrcValidator()
self._validators["tickwidth"] = v_yaxis.TickwidthValidator()
self._validators["title"] = v_yaxis.TitleValidator()
self._validators["type"] = v_yaxis.TypeValidator()
self._validators["visible"] = v_yaxis.VisibleValidator()
self._validators["zeroline"] = v_yaxis.ZerolineValidator()
self._validators["zerolinecolor"] = v_yaxis.ZerolinecolorValidator()
self._validators["zerolinewidth"] = v_yaxis.ZerolinewidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autorange", None)
self["autorange"] = autorange if autorange is not None else _v
_v = arg.pop("backgroundcolor", None)
self["backgroundcolor"] = backgroundcolor if backgroundcolor is not None else _v
_v = arg.pop("calendar", None)
self["calendar"] = calendar if calendar is not None else _v
_v = arg.pop("categoryarray", None)
self["categoryarray"] = categoryarray if categoryarray is not None else _v
_v = arg.pop("categoryarraysrc", None)
self["categoryarraysrc"] = (
categoryarraysrc if categoryarraysrc is not None else _v
)
_v = arg.pop("categoryorder", None)
self["categoryorder"] = categoryorder if categoryorder is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("gridcolor", None)
self["gridcolor"] = gridcolor if gridcolor is not None else _v
_v = arg.pop("gridwidth", None)
self["gridwidth"] = gridwidth if gridwidth is not None else _v
_v = arg.pop("hoverformat", None)
self["hoverformat"] = hoverformat if hoverformat is not None else _v
_v = arg.pop("linecolor", None)
self["linecolor"] = linecolor if linecolor is not None else _v
_v = arg.pop("linewidth", None)
self["linewidth"] = linewidth if linewidth is not None else _v
_v = arg.pop("mirror", None)
self["mirror"] = mirror if mirror is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("range", None)
self["range"] = range if range is not None else _v
_v = arg.pop("rangemode", None)
self["rangemode"] = rangemode if rangemode is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showaxeslabels", None)
self["showaxeslabels"] = showaxeslabels if showaxeslabels is not None else _v
_v = arg.pop("showbackground", None)
self["showbackground"] = showbackground if showbackground is not None else _v
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showgrid", None)
self["showgrid"] = showgrid if showgrid is not None else _v
_v = arg.pop("showline", None)
self["showline"] = showline if showline is not None else _v
_v = arg.pop("showspikes", None)
self["showspikes"] = showspikes if showspikes is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("spikecolor", None)
self["spikecolor"] = spikecolor if spikecolor is not None else _v
_v = arg.pop("spikesides", None)
self["spikesides"] = spikesides if spikesides is not None else _v
_v = arg.pop("spikethickness", None)
self["spikethickness"] = spikethickness if spikethickness is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("zeroline", None)
self["zeroline"] = zeroline if zeroline is not None else _v
_v = arg.pop("zerolinecolor", None)
self["zerolinecolor"] = zerolinecolor if zerolinecolor is not None else _v
_v = arg.pop("zerolinewidth", None)
self["zerolinewidth"] = zerolinewidth if zerolinewidth is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class XAxis(_BaseLayoutHierarchyType):
# autorange
# ---------
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
# backgroundcolor
# ---------------
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
# categoryarray
# -------------
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
# categoryarraysrc
# ----------------
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
categoryarray .
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
# categoryorder
# -------------
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean or median of all the
values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# gridcolor
# ---------
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
# gridwidth
# ---------
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
# hoverformat
# -----------
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
# linecolor
# ---------
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
# linewidth
# ---------
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
# mirror
# ------
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# range
# -----
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
# rangemode
# ---------
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If *tozero*`, the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showaxeslabels
# --------------
@property
def showaxeslabels(self):
"""
Sets whether or not this axis is labeled
The 'showaxeslabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showaxeslabels"]
@showaxeslabels.setter
def showaxeslabels(self, val):
self["showaxeslabels"] = val
# showbackground
# --------------
@property
def showbackground(self):
"""
Sets whether or not this axis' wall has a background color.
The 'showbackground' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showbackground"]
@showbackground.setter
def showbackground(self, val):
self["showbackground"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showgrid
# --------
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
# showline
# --------
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
# showspikes
# ----------
@property
def showspikes(self):
"""
Sets whether or not spikes starting from data points to this
axis' wall are shown on hover.
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# spikecolor
# ----------
@property
def spikecolor(self):
"""
Sets the color of the spikes.
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
# spikesides
# ----------
@property
def spikesides(self):
"""
Sets whether or not spikes extending from the projection data
points to this axis' wall boundaries are shown on hover.
The 'spikesides' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["spikesides"]
@spikesides.setter
def spikesides(self, val):
self["spikesides"] = val
# spikethickness
# --------------
@property
def spikethickness(self):
"""
Sets the thickness (in px) of the spikes.
The 'spikethickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.scene.xaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.layout.scene.xaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.scene.xaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.scene.xaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
Returns
-------
plotly.graph_objs.layout.scene.xaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use layout.scene.xaxis.title.font instead.
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# type
# ----
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# zeroline
# --------
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
# zerolinecolor
# -------------
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
# zerolinewidth
# -------------
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.xa
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.xaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.xaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.xaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
_mapped_properties = {"titlefont": ("title", "font")}
def __init__(
self,
arg=None,
autorange=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
linecolor=None,
linewidth=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs
):
"""
Construct a new XAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.XAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.xa
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.xaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.xaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.xaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
XAxis
"""
super(XAxis, self).__init__("xaxis")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.XAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.XAxis`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import xaxis as v_xaxis
# Initialize validators
# ---------------------
self._validators["autorange"] = v_xaxis.AutorangeValidator()
self._validators["backgroundcolor"] = v_xaxis.BackgroundcolorValidator()
self._validators["calendar"] = v_xaxis.CalendarValidator()
self._validators["categoryarray"] = v_xaxis.CategoryarrayValidator()
self._validators["categoryarraysrc"] = v_xaxis.CategoryarraysrcValidator()
self._validators["categoryorder"] = v_xaxis.CategoryorderValidator()
self._validators["color"] = v_xaxis.ColorValidator()
self._validators["dtick"] = v_xaxis.DtickValidator()
self._validators["exponentformat"] = v_xaxis.ExponentformatValidator()
self._validators["gridcolor"] = v_xaxis.GridcolorValidator()
self._validators["gridwidth"] = v_xaxis.GridwidthValidator()
self._validators["hoverformat"] = v_xaxis.HoverformatValidator()
self._validators["linecolor"] = v_xaxis.LinecolorValidator()
self._validators["linewidth"] = v_xaxis.LinewidthValidator()
self._validators["mirror"] = v_xaxis.MirrorValidator()
self._validators["nticks"] = v_xaxis.NticksValidator()
self._validators["range"] = v_xaxis.RangeValidator()
self._validators["rangemode"] = v_xaxis.RangemodeValidator()
self._validators["separatethousands"] = v_xaxis.SeparatethousandsValidator()
self._validators["showaxeslabels"] = v_xaxis.ShowaxeslabelsValidator()
self._validators["showbackground"] = v_xaxis.ShowbackgroundValidator()
self._validators["showexponent"] = v_xaxis.ShowexponentValidator()
self._validators["showgrid"] = v_xaxis.ShowgridValidator()
self._validators["showline"] = v_xaxis.ShowlineValidator()
self._validators["showspikes"] = v_xaxis.ShowspikesValidator()
self._validators["showticklabels"] = v_xaxis.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_xaxis.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_xaxis.ShowticksuffixValidator()
self._validators["spikecolor"] = v_xaxis.SpikecolorValidator()
self._validators["spikesides"] = v_xaxis.SpikesidesValidator()
self._validators["spikethickness"] = v_xaxis.SpikethicknessValidator()
self._validators["tick0"] = v_xaxis.Tick0Validator()
self._validators["tickangle"] = v_xaxis.TickangleValidator()
self._validators["tickcolor"] = v_xaxis.TickcolorValidator()
self._validators["tickfont"] = v_xaxis.TickfontValidator()
self._validators["tickformat"] = v_xaxis.TickformatValidator()
self._validators["tickformatstops"] = v_xaxis.TickformatstopsValidator()
self._validators["tickformatstopdefaults"] = v_xaxis.TickformatstopValidator()
self._validators["ticklen"] = v_xaxis.TicklenValidator()
self._validators["tickmode"] = v_xaxis.TickmodeValidator()
self._validators["tickprefix"] = v_xaxis.TickprefixValidator()
self._validators["ticks"] = v_xaxis.TicksValidator()
self._validators["ticksuffix"] = v_xaxis.TicksuffixValidator()
self._validators["ticktext"] = v_xaxis.TicktextValidator()
self._validators["ticktextsrc"] = v_xaxis.TicktextsrcValidator()
self._validators["tickvals"] = v_xaxis.TickvalsValidator()
self._validators["tickvalssrc"] = v_xaxis.TickvalssrcValidator()
self._validators["tickwidth"] = v_xaxis.TickwidthValidator()
self._validators["title"] = v_xaxis.TitleValidator()
self._validators["type"] = v_xaxis.TypeValidator()
self._validators["visible"] = v_xaxis.VisibleValidator()
self._validators["zeroline"] = v_xaxis.ZerolineValidator()
self._validators["zerolinecolor"] = v_xaxis.ZerolinecolorValidator()
self._validators["zerolinewidth"] = v_xaxis.ZerolinewidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autorange", None)
self["autorange"] = autorange if autorange is not None else _v
_v = arg.pop("backgroundcolor", None)
self["backgroundcolor"] = backgroundcolor if backgroundcolor is not None else _v
_v = arg.pop("calendar", None)
self["calendar"] = calendar if calendar is not None else _v
_v = arg.pop("categoryarray", None)
self["categoryarray"] = categoryarray if categoryarray is not None else _v
_v = arg.pop("categoryarraysrc", None)
self["categoryarraysrc"] = (
categoryarraysrc if categoryarraysrc is not None else _v
)
_v = arg.pop("categoryorder", None)
self["categoryorder"] = categoryorder if categoryorder is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("gridcolor", None)
self["gridcolor"] = gridcolor if gridcolor is not None else _v
_v = arg.pop("gridwidth", None)
self["gridwidth"] = gridwidth if gridwidth is not None else _v
_v = arg.pop("hoverformat", None)
self["hoverformat"] = hoverformat if hoverformat is not None else _v
_v = arg.pop("linecolor", None)
self["linecolor"] = linecolor if linecolor is not None else _v
_v = arg.pop("linewidth", None)
self["linewidth"] = linewidth if linewidth is not None else _v
_v = arg.pop("mirror", None)
self["mirror"] = mirror if mirror is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("range", None)
self["range"] = range if range is not None else _v
_v = arg.pop("rangemode", None)
self["rangemode"] = rangemode if rangemode is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showaxeslabels", None)
self["showaxeslabels"] = showaxeslabels if showaxeslabels is not None else _v
_v = arg.pop("showbackground", None)
self["showbackground"] = showbackground if showbackground is not None else _v
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showgrid", None)
self["showgrid"] = showgrid if showgrid is not None else _v
_v = arg.pop("showline", None)
self["showline"] = showline if showline is not None else _v
_v = arg.pop("showspikes", None)
self["showspikes"] = showspikes if showspikes is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("spikecolor", None)
self["spikecolor"] = spikecolor if spikecolor is not None else _v
_v = arg.pop("spikesides", None)
self["spikesides"] = spikesides if spikesides is not None else _v
_v = arg.pop("spikethickness", None)
self["spikethickness"] = spikethickness if spikethickness is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("zeroline", None)
self["zeroline"] = zeroline if zeroline is not None else _v
_v = arg.pop("zerolinecolor", None)
self["zerolinecolor"] = zerolinecolor if zerolinecolor is not None else _v
_v = arg.pop("zerolinewidth", None)
self["zerolinewidth"] = zerolinewidth if zerolinewidth is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Domain(_BaseLayoutHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this scene subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this scene subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this scene subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this scene subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Camera(_BaseLayoutHierarchyType):
# center
# ------
@property
def center(self):
"""
Sets the (x,y,z) components of the 'center' camera vector This
vector determines the translation (x,y,z) space about the
center of this scene. By default, there is no such translation.
The 'center' property is an instance of Center
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.camera.Center`
- A dict of string/value properties that will be passed
to the Center constructor
Supported dict properties:
x
y
z
Returns
-------
plotly.graph_objs.layout.scene.camera.Center
"""
return self["center"]
@center.setter
def center(self, val):
self["center"] = val
# eye
# ---
@property
def eye(self):
"""
Sets the (x,y,z) components of the 'eye' camera vector. This
vector determines the view point about the origin of this
scene.
The 'eye' property is an instance of Eye
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.camera.Eye`
- A dict of string/value properties that will be passed
to the Eye constructor
Supported dict properties:
x
y
z
Returns
-------
plotly.graph_objs.layout.scene.camera.Eye
"""
return self["eye"]
@eye.setter
def eye(self, val):
self["eye"] = val
# projection
# ----------
@property
def projection(self):
"""
The 'projection' property is an instance of Projection
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.camera.Projection`
- A dict of string/value properties that will be passed
to the Projection constructor
Supported dict properties:
type
Sets the projection type. The projection type
could be either "perspective" or
"orthographic". The default is "perspective".
Returns
-------
plotly.graph_objs.layout.scene.camera.Projection
"""
return self["projection"]
@projection.setter
def projection(self, val):
self["projection"] = val
# up
# --
@property
def up(self):
"""
Sets the (x,y,z) components of the 'up' camera vector. This
vector determines the up direction of this scene with respect
to the page. The default is *{x: 0, y: 0, z: 1}* which means
that the z axis points up.
The 'up' property is an instance of Up
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.camera.Up`
- A dict of string/value properties that will be passed
to the Up constructor
Supported dict properties:
x
y
z
Returns
-------
plotly.graph_objs.layout.scene.camera.Up
"""
return self["up"]
@up.setter
def up(self, val):
self["up"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
center
Sets the (x,y,z) components of the 'center' camera
vector This vector determines the translation (x,y,z)
space about the center of this scene. By default, there
is no such translation.
eye
Sets the (x,y,z) components of the 'eye' camera vector.
This vector determines the view point about the origin
of this scene.
projection
:class:`plotly.graph_objects.layout.scene.camera.Projec
tion` instance or dict with compatible properties
up
Sets the (x,y,z) components of the 'up' camera vector.
This vector determines the up direction of this scene
with respect to the page. The default is *{x: 0, y: 0,
z: 1}* which means that the z axis points up.
"""
def __init__(
self, arg=None, center=None, eye=None, projection=None, up=None, **kwargs
):
"""
Construct a new Camera object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Camera`
center
Sets the (x,y,z) components of the 'center' camera
vector This vector determines the translation (x,y,z)
space about the center of this scene. By default, there
is no such translation.
eye
Sets the (x,y,z) components of the 'eye' camera vector.
This vector determines the view point about the origin
of this scene.
projection
:class:`plotly.graph_objects.layout.scene.camera.Projec
tion` instance or dict with compatible properties
up
Sets the (x,y,z) components of the 'up' camera vector.
This vector determines the up direction of this scene
with respect to the page. The default is *{x: 0, y: 0,
z: 1}* which means that the z axis points up.
Returns
-------
Camera
"""
super(Camera, self).__init__("camera")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Camera
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Camera`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import camera as v_camera
# Initialize validators
# ---------------------
self._validators["center"] = v_camera.CenterValidator()
self._validators["eye"] = v_camera.EyeValidator()
self._validators["projection"] = v_camera.ProjectionValidator()
self._validators["up"] = v_camera.UpValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("center", None)
self["center"] = center if center is not None else _v
_v = arg.pop("eye", None)
self["eye"] = eye if eye is not None else _v
_v = arg.pop("projection", None)
self["projection"] = projection if projection is not None else _v
_v = arg.pop("up", None)
self["up"] = up if up is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Aspectratio(_BaseLayoutHierarchyType):
# x
# -
@property
def x(self):
"""
The 'x' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
y
z
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Aspectratio object
Sets this scene's axis aspectratio.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Aspectratio`
x
y
z
Returns
-------
Aspectratio
"""
super(Aspectratio, self).__init__("aspectratio")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Aspectratio
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Aspectratio`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import aspectratio as v_aspectratio
# Initialize validators
# ---------------------
self._validators["x"] = v_aspectratio.XValidator()
self._validators["y"] = v_aspectratio.YValidator()
self._validators["z"] = v_aspectratio.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("z", None)
self["z"] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Annotation(_BaseLayoutHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the `text` within the box. Has
an effect only if `text` spans two or more lines (i.e. `text`
contains one or more <br> HTML tags) or if an explicit width is
set to override the text width.
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# arrowcolor
# ----------
@property
def arrowcolor(self):
"""
Sets the color of the annotation arrow.
The 'arrowcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["arrowcolor"]
@arrowcolor.setter
def arrowcolor(self, val):
self["arrowcolor"] = val
# arrowhead
# ---------
@property
def arrowhead(self):
"""
Sets the end annotation arrow head style.
The 'arrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["arrowhead"]
@arrowhead.setter
def arrowhead(self, val):
self["arrowhead"] = val
# arrowside
# ---------
@property
def arrowside(self):
"""
Sets the annotation arrow head position.
The 'arrowside' property is a flaglist and may be specified
as a string containing:
- Any combination of ['end', 'start'] joined with '+' characters
(e.g. 'end+start')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["arrowside"]
@arrowside.setter
def arrowside(self, val):
self["arrowside"] = val
# arrowsize
# ---------
@property
def arrowsize(self):
"""
Sets the size of the end annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'arrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["arrowsize"]
@arrowsize.setter
def arrowsize(self, val):
self["arrowsize"] = val
# arrowwidth
# ----------
@property
def arrowwidth(self):
"""
Sets the width (in px) of annotation arrow line.
The 'arrowwidth' property is a number and may be specified as:
- An int or float in the interval [0.1, inf]
Returns
-------
int|float
"""
return self["arrowwidth"]
@arrowwidth.setter
def arrowwidth(self, val):
self["arrowwidth"] = val
# ax
# --
@property
def ax(self):
"""
Sets the x component of the arrow tail about the arrow head (in
pixels).
The 'ax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["ax"]
@ax.setter
def ax(self, val):
self["ax"] = val
# ay
# --
@property
def ay(self):
"""
Sets the y component of the arrow tail about the arrow head (in
pixels).
The 'ay' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["ay"]
@ay.setter
def ay(self, val):
self["ay"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the annotation.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the annotation `text`.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderpad
# ---------
@property
def borderpad(self):
"""
Sets the padding (in px) between the `text` and the enclosing
border.
The 'borderpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderpad"]
@borderpad.setter
def borderpad(self, val):
self["borderpad"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the annotation
`text`.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# captureevents
# -------------
@property
def captureevents(self):
"""
Determines whether the annotation text box captures mouse move
and click events, or allows those events to pass through to
data points in the plot that may be behind the annotation. By
default `captureevents` is False unless `hovertext` is
provided. If you use the event `plotly_clickannotation` without
`hovertext` you must explicitly enable `captureevents`.
The 'captureevents' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["captureevents"]
@captureevents.setter
def captureevents(self, val):
self["captureevents"] = val
# font
# ----
@property
def font(self):
"""
Sets the annotation text font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.annotation.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# height
# ------
@property
def height(self):
"""
Sets an explicit height for the text box. null (default) lets
the text set the box height. Taller text will be clipped.
The 'height' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["height"]
@height.setter
def height(self, val):
self["height"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover label.
By default uses the annotation's `bgcolor` made
opaque, or white if it was transparent.
bordercolor
Sets the border color of the hover label. By
default uses either dark grey or white, for
maximum contrast with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses
the global hover font and size, with color from
`hoverlabel.bordercolor`.
Returns
-------
plotly.graph_objs.layout.scene.annotation.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets text to appear when hovering over this annotation. If
omitted or blank, no hover label will appear.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the annotation (text + arrow).
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# showarrow
# ---------
@property
def showarrow(self):
"""
Determines whether or not the annotation is drawn with an
arrow. If True, `text` is placed near the arrow's tail. If
False, `text` lines up with the `x` and `y` provided.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
# standoff
# --------
@property
def standoff(self):
"""
Sets a distance, in pixels, to move the end arrowhead away from
the position it is pointing at, for example to point at the
edge of a marker independent of zoom. Note that this shortens
the arrow from the `ax` / `ay` vector, in contrast to `xshift`
/ `yshift` which moves everything by this amount.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
# startarrowhead
# --------------
@property
def startarrowhead(self):
"""
Sets the start annotation arrow head style.
The 'startarrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["startarrowhead"]
@startarrowhead.setter
def startarrowhead(self, val):
self["startarrowhead"] = val
# startarrowsize
# --------------
@property
def startarrowsize(self):
"""
Sets the size of the start annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'startarrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["startarrowsize"]
@startarrowsize.setter
def startarrowsize(self, val):
self["startarrowsize"] = val
# startstandoff
# -------------
@property
def startstandoff(self):
"""
Sets a distance, in pixels, to move the start arrowhead away
from the position it is pointing at, for example to point at
the edge of a marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector, in contrast to
`xshift` / `yshift` which moves everything by this amount.
The 'startstandoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["startstandoff"]
@startstandoff.setter
def startstandoff(self, val):
self["startstandoff"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# text
# ----
@property
def text(self):
"""
Sets the text associated with this annotation. Plotly uses a
subset of HTML tags to do things like newline (<br>), bold
(<b></b>), italics (<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also supported.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textangle
# ---------
@property
def textangle(self):
"""
Sets the angle at which the `text` is drawn with respect to the
horizontal.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
# valign
# ------
@property
def valign(self):
"""
Sets the vertical alignment of the `text` within the box. Has
an effect only if an explicit height is set to override the
text height.
The 'valign' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["valign"]
@valign.setter
def valign(self, val):
self["valign"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this annotation is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets an explicit width for the text box. null (default) lets
the text set the box width. Wider text will be clipped. There
is no automatic wrapping; use <br> to start a new line.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# x
# -
@property
def x(self):
"""
Sets the annotation's x position.
The 'x' property accepts values of any type
Returns
-------
Any
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the text box's horizontal position anchor This anchor
binds the `x` position to the "left", "center" or "right" of
the annotation. For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the right-most portion of
the annotation lines up with the right-most edge of the
plotting area. If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is an arrow,
whereas for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xshift
# ------
@property
def xshift(self):
"""
Shifts the position of the whole annotation and arrow to the
right (positive) or left (negative) by this many pixels.
The 'xshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["xshift"]
@xshift.setter
def xshift(self, val):
self["xshift"] = val
# y
# -
@property
def y(self):
"""
Sets the annotation's y position.
The 'y' property accepts values of any type
Returns
-------
Any
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the text box's vertical position anchor This anchor binds
the `y` position to the "top", "middle" or "bottom" of the
annotation. For example, if `y` is set to 1, `yref` to "paper"
and `yanchor` to "top" then the top-most portion of the
annotation lines up with the top-most edge of the plotting
area. If "auto", the anchor is equivalent to "middle" for data-
referenced annotations or if there is an arrow, whereas for
paper-referenced with no arrow, the anchor picked corresponds
to the closest side.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# yshift
# ------
@property
def yshift(self):
"""
Shifts the position of the whole annotation and arrow up
(positive) or down (negative) by this many pixels.
The 'yshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["yshift"]
@yshift.setter
def yshift(self, val):
self["yshift"] = val
# z
# -
@property
def z(self):
"""
Sets the annotation's z position.
The 'z' property accepts values of any type
Returns
-------
Any
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.scene"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head (in pixels).
ay
Sets the y component of the arrow tail about the arrow
head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annotation.Ho
verlabel` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
z
Sets the annotation's z position.
"""
def __init__(
self,
arg=None,
align=None,
arrowcolor=None,
arrowhead=None,
arrowside=None,
arrowsize=None,
arrowwidth=None,
ax=None,
ay=None,
bgcolor=None,
bordercolor=None,
borderpad=None,
borderwidth=None,
captureevents=None,
font=None,
height=None,
hoverlabel=None,
hovertext=None,
name=None,
opacity=None,
showarrow=None,
standoff=None,
startarrowhead=None,
startarrowsize=None,
startstandoff=None,
templateitemname=None,
text=None,
textangle=None,
valign=None,
visible=None,
width=None,
x=None,
xanchor=None,
xshift=None,
y=None,
yanchor=None,
yshift=None,
z=None,
**kwargs
):
"""
Construct a new Annotation object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.Annotation`
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head (in pixels).
ay
Sets the y component of the arrow tail about the arrow
head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annotation.Ho
verlabel` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
z
Sets the annotation's z position.
Returns
-------
Annotation
"""
super(Annotation, self).__init__("annotations")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Annotation
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.Annotation`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.scene import annotation as v_annotation
# Initialize validators
# ---------------------
self._validators["align"] = v_annotation.AlignValidator()
self._validators["arrowcolor"] = v_annotation.ArrowcolorValidator()
self._validators["arrowhead"] = v_annotation.ArrowheadValidator()
self._validators["arrowside"] = v_annotation.ArrowsideValidator()
self._validators["arrowsize"] = v_annotation.ArrowsizeValidator()
self._validators["arrowwidth"] = v_annotation.ArrowwidthValidator()
self._validators["ax"] = v_annotation.AxValidator()
self._validators["ay"] = v_annotation.AyValidator()
self._validators["bgcolor"] = v_annotation.BgcolorValidator()
self._validators["bordercolor"] = v_annotation.BordercolorValidator()
self._validators["borderpad"] = v_annotation.BorderpadValidator()
self._validators["borderwidth"] = v_annotation.BorderwidthValidator()
self._validators["captureevents"] = v_annotation.CaptureeventsValidator()
self._validators["font"] = v_annotation.FontValidator()
self._validators["height"] = v_annotation.HeightValidator()
self._validators["hoverlabel"] = v_annotation.HoverlabelValidator()
self._validators["hovertext"] = v_annotation.HovertextValidator()
self._validators["name"] = v_annotation.NameValidator()
self._validators["opacity"] = v_annotation.OpacityValidator()
self._validators["showarrow"] = v_annotation.ShowarrowValidator()
self._validators["standoff"] = v_annotation.StandoffValidator()
self._validators["startarrowhead"] = v_annotation.StartarrowheadValidator()
self._validators["startarrowsize"] = v_annotation.StartarrowsizeValidator()
self._validators["startstandoff"] = v_annotation.StartstandoffValidator()
self._validators["templateitemname"] = v_annotation.TemplateitemnameValidator()
self._validators["text"] = v_annotation.TextValidator()
self._validators["textangle"] = v_annotation.TextangleValidator()
self._validators["valign"] = v_annotation.ValignValidator()
self._validators["visible"] = v_annotation.VisibleValidator()
self._validators["width"] = v_annotation.WidthValidator()
self._validators["x"] = v_annotation.XValidator()
self._validators["xanchor"] = v_annotation.XanchorValidator()
self._validators["xshift"] = v_annotation.XshiftValidator()
self._validators["y"] = v_annotation.YValidator()
self._validators["yanchor"] = v_annotation.YanchorValidator()
self._validators["yshift"] = v_annotation.YshiftValidator()
self._validators["z"] = v_annotation.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("arrowcolor", None)
self["arrowcolor"] = arrowcolor if arrowcolor is not None else _v
_v = arg.pop("arrowhead", None)
self["arrowhead"] = arrowhead if arrowhead is not None else _v
_v = arg.pop("arrowside", None)
self["arrowside"] = arrowside if arrowside is not None else _v
_v = arg.pop("arrowsize", None)
self["arrowsize"] = arrowsize if arrowsize is not None else _v
_v = arg.pop("arrowwidth", None)
self["arrowwidth"] = arrowwidth if arrowwidth is not None else _v
_v = arg.pop("ax", None)
self["ax"] = ax if ax is not None else _v
_v = arg.pop("ay", None)
self["ay"] = ay if ay is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("borderpad", None)
self["borderpad"] = borderpad if borderpad is not None else _v
_v = arg.pop("borderwidth", None)
self["borderwidth"] = borderwidth if borderwidth is not None else _v
_v = arg.pop("captureevents", None)
self["captureevents"] = captureevents if captureevents is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("height", None)
self["height"] = height if height is not None else _v
_v = arg.pop("hoverlabel", None)
self["hoverlabel"] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop("hovertext", None)
self["hovertext"] = hovertext if hovertext is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("opacity", None)
self["opacity"] = opacity if opacity is not None else _v
_v = arg.pop("showarrow", None)
self["showarrow"] = showarrow if showarrow is not None else _v
_v = arg.pop("standoff", None)
self["standoff"] = standoff if standoff is not None else _v
_v = arg.pop("startarrowhead", None)
self["startarrowhead"] = startarrowhead if startarrowhead is not None else _v
_v = arg.pop("startarrowsize", None)
self["startarrowsize"] = startarrowsize if startarrowsize is not None else _v
_v = arg.pop("startstandoff", None)
self["startstandoff"] = startstandoff if startstandoff is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
_v = arg.pop("textangle", None)
self["textangle"] = textangle if textangle is not None else _v
_v = arg.pop("valign", None)
self["valign"] = valign if valign is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xanchor", None)
self["xanchor"] = xanchor if xanchor is not None else _v
_v = arg.pop("xshift", None)
self["xshift"] = xshift if xshift is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("yanchor", None)
self["yanchor"] = yanchor if yanchor is not None else _v
_v = arg.pop("yshift", None)
self["yshift"] = yshift if yshift is not None else _v
_v = arg.pop("z", None)
self["z"] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Annotation",
"Annotation",
"Aspectratio",
"Camera",
"Domain",
"XAxis",
"YAxis",
"ZAxis",
"annotation",
"camera",
"xaxis",
"yaxis",
"zaxis",
]
from plotly.graph_objs.layout.scene import zaxis
from plotly.graph_objs.layout.scene import yaxis
from plotly.graph_objs.layout.scene import xaxis
from plotly.graph_objs.layout.scene import camera
from plotly.graph_objs.layout.scene import annotation
| 37.759866
| 95
| 0.578087
| 40,391
| 359,776
| 5.119111
| 0.025476
| 0.012018
| 0.009402
| 0.013392
| 0.951365
| 0.915634
| 0.913642
| 0.910348
| 0.905013
| 0.904143
| 0
| 0.011624
| 0.333574
| 359,776
| 9,527
| 96
| 37.763829
| 0.850749
| 0.506165
| 0
| 0.77099
| 0
| 0.003611
| 0.421801
| 0.01258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134517
| false
| 0.000301
| 0.007824
| 0.004213
| 0.213662
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
913930e470d9550e54bd835c7d0d63345f6e5966
| 3,935
|
py
|
Python
|
tests/test_hand.py
|
alisol911/hot-hands
|
648f32b3b11b768dbced407fabc963694a97dbfe
|
[
"MIT"
] | null | null | null |
tests/test_hand.py
|
alisol911/hot-hands
|
648f32b3b11b768dbced407fabc963694a97dbfe
|
[
"MIT"
] | null | null | null |
tests/test_hand.py
|
alisol911/hot-hands
|
648f32b3b11b768dbced407fabc963694a97dbfe
|
[
"MIT"
] | null | null | null |
import unittest
from server.models import (Hand, HandType, WinnerType, MinHand, MaxHand)
class HandTests(unittest.TestCase):
def test_hand(self):
h = Hand()
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
t = h.Throw()
self.assertTrue(t >= MinHand and t <= MaxHand)
def test_judge(self):
h = Hand()
self.assertTrue(h.Judge(HandType.Nothing, HandType.Nothing) == WinnerType.Draw)
self.assertTrue(h.Judge(HandType.Nothing, HandType.Rock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Nothing, HandType.Paper) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Nothing, HandType.Scissors) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Nothing, HandType.Spock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Nothing, HandType.Lizard) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Rock, HandType.Nothing) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Rock, HandType.Rock) == WinnerType.Draw)
self.assertTrue(h.Judge(HandType.Rock, HandType.Paper) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Rock, HandType.Scissors) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Rock, HandType.Spock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Rock, HandType.Lizard) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Paper, HandType.Nothing) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Paper, HandType.Rock) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Paper, HandType.Paper) == WinnerType.Draw)
self.assertTrue(h.Judge(HandType.Paper, HandType.Scissors) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Paper, HandType.Spock) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Paper, HandType.Lizard) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Nothing) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Rock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Paper) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Scissors) == WinnerType.Draw)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Spock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Scissors, HandType.Lizard) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Spock, HandType.Nothing) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Spock, HandType.Rock) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Spock, HandType.Paper) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Spock, HandType.Scissors) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Spock, HandType.Spock) == WinnerType.Draw)
self.assertTrue(h.Judge(HandType.Spock, HandType.Lizard) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Nothing) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Rock) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Paper) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Scissors) == WinnerType.Player2)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Spock) == WinnerType.Player1)
self.assertTrue(h.Judge(HandType.Lizard, HandType.Lizard) == WinnerType.Draw)
| 59.621212
| 91
| 0.695044
| 457
| 3,935
| 5.980306
| 0.067834
| 0.220271
| 0.197585
| 0.263447
| 0.929748
| 0.929748
| 0.929748
| 0.822905
| 0.822905
| 0.102452
| 0
| 0.009183
| 0.169759
| 3,935
| 65
| 92
| 60.538462
| 0.827365
| 0
| 0
| 0.280702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.754386
| 1
| 0.035088
| false
| 0
| 0.035088
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e66f8db0ef1ce94a0a49fdcadb251832caa7c689
| 132
|
py
|
Python
|
grayscale/math/min.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
grayscale/math/min.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
grayscale/math/min.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
from builtins import min as builtin_min
from typing import List
def min(nums: List[float]) -> float:
return builtin_min(nums)
| 18.857143
| 39
| 0.75
| 21
| 132
| 4.619048
| 0.571429
| 0.206186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174242
| 132
| 6
| 40
| 22
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
e68a17764189ab251520c2eae0c6cd849d71a5ed
| 420
|
py
|
Python
|
sap2012/SAP_tables/__init__.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | 7
|
2021-04-17T21:55:37.000Z
|
2021-08-19T13:06:16.000Z
|
sap2012/SAP_tables/__init__.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | null | null | null |
sap2012/SAP_tables/__init__.py
|
building-energy/sap2012
|
4cb3a362be4662b0e96c56a3765771f0cba91422
|
[
"MIT"
] | 2
|
2021-03-21T16:14:50.000Z
|
2021-04-20T08:54:41.000Z
|
# -*- coding: utf-8 -*-
from .temperature_reduction_when_heating_is_off_table_9b import temperature_reduction_when_heating_is_off_table_9b
from .utilisation_factor_for_heating_table_9a import utilisation_factor_for_heating_table_9a
from .heating_requirement_table_9c import heating_requirement_table_9c
from .utilisation_factor_for_heating_whole_house_table_9a import utilisation_factor_for_heating_whole_house_table_9a
| 70
| 116
| 0.919048
| 63
| 420
| 5.428571
| 0.333333
| 0.19883
| 0.233918
| 0.315789
| 0.766082
| 0.74269
| 0.643275
| 0.508772
| 0
| 0
| 0
| 0.022556
| 0.05
| 420
| 6
| 116
| 70
| 0.834586
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e6b80e26be44b41285b0a62e8ed11cca2b8a67d8
| 6,665
|
py
|
Python
|
test/programytest/config/brain/test_security.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 2
|
2018-06-16T09:32:22.000Z
|
2019-07-21T13:16:00.000Z
|
test/programytest/config/brain/test_security.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 3
|
2020-07-16T04:00:42.000Z
|
2021-03-31T18:52:22.000Z
|
test/programytest/config/brain/test_security.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 4
|
2018-06-29T23:50:44.000Z
|
2020-11-05T08:13:47.000Z
|
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.security import BrainSecurityConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class BrainSecurityConfigurationTests(unittest.TestCase):
def test_authorisation_with_data_denied_srai(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authorisation:
classname: programy.security.authorise.passthrough.PassThroughAuthorisationService
denied_srai: AUTHORISATION_FAILED
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authorisation")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authorise.passthrough.PassThroughAuthorisationService", service_config.classname)
self.assertEqual("AUTHORISATION_FAILED", service_config.denied_srai)
self.assertEqual(BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED, service_config.denied_text)
def test_authorisation_with_data_denied_text(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authorisation:
classname: programy.security.authorise.passthrough.PassThroughAuthorisationService
denied_text: Authorisation Failed
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authorisation")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authorise.passthrough.PassThroughAuthorisationService", service_config.classname)
self.assertEqual("Authorisation Failed", service_config.denied_text)
self.assertIsNone(service_config.denied_srai)
def test_authorisation_with_data_neither_denied_srai_or_text(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authorisation:
classname: programy.security.authorise.passthrough.PassThroughAuthorisationService
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authorisation")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authorise.passthrough.PassThroughAuthorisationService", service_config.classname)
self.assertEqual(BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED, service_config.denied_text)
self.assertIsNone(service_config.denied_srai)
def test_authentication_with_data_denied_srai(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authentication:
classname: programy.security.authenticate.passthrough.PassThroughAuthenticationService
denied_srai: AUTHENTICATION_FAILED
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authentication")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authenticate.passthrough.PassThroughAuthenticationService", service_config.classname)
self.assertEqual("AUTHENTICATION_FAILED", service_config.denied_srai)
self.assertEqual(BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED, service_config.denied_text)
def test_authentication_with_data_denied_text(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authentication:
classname: programy.security.authenticate.passthrough.PassThroughAuthenticationService
denied_text: Authentication failed
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authentication")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authenticate.passthrough.PassThroughAuthenticationService", service_config.classname)
self.assertEqual("Authentication failed", service_config.denied_text)
self.assertIsNone(service_config.denied_srai)
def test_authentication_with_data_neither_denied_srai_or_text(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
security:
authentication:
classname: programy.security.authenticate.passthrough.PassThroughAuthenticationService
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
services_config = yaml.get_section("security", brain_config)
self.assertIsNotNone(services_config)
service_config = BrainSecurityConfiguration("authentication")
service_config.load_config_section(yaml, services_config, ".")
self.assertEqual("programy.security.authenticate.passthrough.PassThroughAuthenticationService", service_config.classname)
self.assertEqual(BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED, service_config.denied_text)
self.assertEqual(BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED, service_config.denied_text)
| 46.284722
| 129
| 0.71868
| 577
| 6,665
| 8.008666
| 0.081456
| 0.084397
| 0.033759
| 0.051937
| 0.942437
| 0.939623
| 0.929453
| 0.929453
| 0.929453
| 0.929453
| 0
| 0
| 0.1997
| 6,665
| 143
| 130
| 46.608392
| 0.866329
| 0
| 0
| 0.837607
| 0
| 0
| 0.300675
| 0.137734
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.051282
| false
| 0.102564
| 0.034188
| 0
| 0.094017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
fc23a887e9cd9279b4d723567c74ec2009de49e3
| 63,209
|
py
|
Python
|
transmitter.py
|
wrycu/srs_recorder
|
c2af1b2a28bf56a8574eb6fb3b356990ba8f78dc
|
[
"MIT"
] | 3
|
2021-07-06T23:22:25.000Z
|
2022-02-11T21:26:25.000Z
|
transmitter.py
|
wrycu/srs_recorder
|
c2af1b2a28bf56a8574eb6fb3b356990ba8f78dc
|
[
"MIT"
] | 17
|
2020-11-15T05:25:55.000Z
|
2021-12-07T22:09:04.000Z
|
transmitter.py
|
wrycu/srs_recorder
|
c2af1b2a28bf56a8574eb6fb3b356990ba8f78dc
|
[
"MIT"
] | null | null | null |
import socket
import time
import arrow
data = {
'thanksgiving': [
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01N\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01O\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01P\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01Q\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01R\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01S\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01T\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01U\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01V\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'X\x00\x0f\x00\n\x00P\x03q\xbc\xe4f\xe3<<\xb5\x08\x8ak!@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01W\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'h\x00\x1f\x00\n\x00PC{XX\xf8D\xa4\xaf"\x9fe\xff\xcap\xb5\xee\xc2\x03\xd7\xb6\x8d\n\x91\xfc\xbb\xf0\xa4\xdb\xfd@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01X\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x93\x00J\x00\n\x00P\xc3\x08\xc5\x0f\x8e\t\xec\xc2\xebk\xa8\x94M\xa0\x8b\xa6q\xc7O \x80\x14\xfc\xe4[\xb1\x88U6\xeeFw\x8e\xddH\xd1\x8ddf\xb5nw\x9cvK\x82\x91J\xe5\xfe2\x02\x02\xe9\xe9\x82\xe1\x87,\x11\xc2Tf5\xe5TBg\x13s\xc2\x88t\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01Y\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xb3\x00j\x00\n\x00P\xc5\x14\xc3]\xa7u&G7z\n?\xc9\xc5\x97\x0b\x01z\x15\xe0\x81\xb4\x86\xf4'+\xf2\xab6\x18\x9c2\xea\x90\x1e*\xccWR\xb2\xb2D\xac0\x88\xa7\xba\xe1v7\x87O\xa8\x98\xcdd\xa5\xa3C\xd2-f\x19k\xd2\xdec\xd8\x93\xdb y\xc3\xe6\x04\xd9y\x8ak\x9a\r\x88\xcd\xdc\xcd\xe0v:B^?G3\xa7y\xe8\xaf\x81\x9d^h\x98\xf1\x12\xe0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01Z\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xb4\x00k\x00\n\x00P\xca\xaeO\xbd\xdb\'\xab=\xe7X\xe8\xf5ZjIP\x0bP\xe1\xfd(\xdci\xa2\xb5[a\x14\xd2D\xa3\xcd\x8ci\x9aC\x96\x1a\x87\xb2\xc7c\xef\xee\xa98[fE\xaf..\x9aj\x8e\x9c\xc4+\xd5;]\x86w\x10\xe2Uhe\xc4\xb8\xce\xdf\x1ef\xa7\xf7\x05"c\x1a\xa4]\xeb\xb6U\xcb\xe73\xda]m\x9d\x98N\x02X\xa8\xf0\x8809\x02 \xfe\xbb.\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01[\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb0\x00g\x00\n\x00P\xcb\x0c\xd1\x01V\x8c\x13\x06\xa8=\xf2\x9e\xc5\xab0T\xe9\xfd\x15\xa4\xe2\x87u\x94\x18\x1cl\\\xb0\xb8\xe8\x8e\xc6\xa2p\x96\xf4X\xaa@5\xe8\xb3}\xb0\xa3\xb8Sb\x04\x1f\xcf\x8f\xce\xe2\x1b\xf0\x99 g>\x1a#\x82x\xab\xadTz\x84\xc8\xd0\xa6XQV,\xc8\x03\xeb\xd9\xb1\x8d\xec\x83]\xbd\xd6\x84\x0f>\x1f\xdb"\xa6\xdfIM\xbd\xe2b\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\\\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa5\x00\\\x00\n\x00P\xd9\xb9<Np\xbf\x0b\x9b\xb9\xed\xa8\x1b#\xda\xcb5\rb\x8d\xd9"\xdb$N\x9c(h\xba\x7f+\x8d[\x14B\xeegO\xbe\x8c;\xbdV\nf\xd9jA\x02C\x94V\xa5\x91"\xa6\xd8\x15\xd6/\xe1\x86c\xcf\xbd}\xc7\xa0y\x85\xb3$\xa3M\x1e\x01\xb0\xd3 =0\x1aj+2\x1b\x96=\xec\xec\x8b\xb8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01]\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xae\x00e\x00\n\x00P\xd7\x13s\xc8V\xc1##\x90.SU\xce\xfb\xb0E\x16\x13|\xc4\xd1\xf0\xd6"\x00\xf0\xc4\x01\xa6\x98\xa4\xe8i\xf0v^$EB\xdd\xab\nSu!\xa23\xb1\xbe\xf0\x12%\xb3\x91V\xf8\x1fw\'{0\xa5T\xd1*\xf4\xa44\xbb\xc7\xe1\xb1\x83\x1f\xe1;z\x03J\xba\x87A\xbe\xd8\x19l\xfbB<\xa1\xb1\x04\x88\x96\xb8,\xf7\nn\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01^\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa8\x00_\x00\n\x00P\xd5\x05?\x95\\\x9e\xc9\xf2\x12i\xf0\xa4[A5\x15P\xdb\xb2\n\t\x1f\xbd\xbb\x19\xefHz\xa9[yz\x86W\xa6\x13\xcd\xca^[\x98\xd4I\xfbd\xb0y\xc4Qpegv9\xb2LAV\x041\xe8&\xe7\xd6\xff\x08\xf4\xd9"\x11#\xf9vq\xca &\xa1\xdb\x89O\xd5\xd70l\xdd\x92\x07;S\xd0\x8a\xad\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01_\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa4\x00[\x00\n\x00P\xd2T\xad=\x16\xcc\x03\x18\xc2\xaa'hCe#\x81r\x16<\xee\x98\t\x9d\xa9G\x8e}\x9f\xb2gO{ \xedl\x1c\x0f\xd0l\xf5@Q\x90\xbf\x9c\x1c^\xc2\xfd\x1fh\xae;\xcd\xcb{cU\xc2\xbb`\xa7\xb9\xef\xdb\xd5\xff;\xa2\xa5\xa7kr\xfezIP\x0e=\x86j\xe2P~\xa2H\x03nr\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01`\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xb3\x00j\x00\n\x00P\xd0\x0b-\x93\xac\x96{\xd4\xf7\xc0\xf0\x87\x87\xd8\xe3\xa9,\xbe& \xa8\xdb\x9f\xc5,\t\x19\xd0e<\xd4<\xeec\x9e$\xaa\x1d\xb4\xdfx\xb1k0\xaa\x8av\xfbI?$c0\x9d5ZT@]tD\xcf\xf8\xab~\x7f6\x8ev\xfdZt\x89:\xbd\x1enzt^t\xb4)*+\xeeA)\xc3\x1f\xd9\x96B\xa0'\x89>`G+Q\xd7\xc1J\x01\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01a\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\x9e\x00U\x00\n\x00P\xd5\x1a!kr\x8d\x8f\xb5\xbb'\x8e\xd4\xaa\x1e\x9evg\x8c\xaft/H\x84\x8a\xe2|\x82\x86\xf4\x1fe\xc6_\xf6\xe9@\x1d\xc5\x9b\xdf\xb7\x19\xe7\x9a\x14=\x01\xe2\xfa\x8a[V\xee\xb6\xb6b\xffK\x8b)%-\xd3\xad\xa8\xc1\t\xf3s\xe1\xb1\x85\xed\xa4\xd79\x11\x93X\xa2\x04\x93\xf8r\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01b\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa2\x00Y\x00\n\x00P\xda\x94\x83H\xdf\xb2\xa2"\x07\xe8>\x81\xbbE\x9e\xb9O\x0eT/\xc8\xf2OaZ\x12\xe0\x8ej\x96P\x17zD\x88\x133X\x97\xbeT\xd6\xd3\x88\xe3\xf7\xae\xfd\x80\x84\xf3M\xfb\xf0t\xc6\xcf\x82\xa9\xf4\n\xb8N\x0b\xed}\xee\x8eY\x9d\xe9"V\xfb\xac\x95\xdc\x8b\xddM\x9a\xd0F&\xe6{r|\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01c\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa2\x00Y\x00\n\x00P\xdat\xf8\xe3M<\xa4:\xc4q\xc1\xe2\x00\xfc\xc7\xbfk\x8c\x13k\x0c\x96u,\xf5\x86/\xd6y\x86'\x13\x83F\xadZk\xf5\xad\xf9\xae@\xbdN\x07\\'\x14\xde\xbb\x1c?\xff=\xf0\x8b\x1c\xb4(\xc3!\xfc'&\xf0^^\xfbV\xa4\xea\xabs\xc9\xe8wr\xccb\xfe\xea`\xd9+p\xfc\x8d\x98\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01d\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9f\x00V\x00\n\x00P\xd4\xd3\xa3\xf5s\xe4\xd9\x1f\xde&\x00\x89\t\xfe>\xd3\x82e\x81\xa1\x05\xd4p\x03\x969`%\x1d\xd7\x16\x92\xc2\xae\x8er\xea\xfc\xef\xa7t\xb2\x84\xc6\xb0%\xd4\xdd\xad\xdaDR]\xe4Jf_ =\x193\x92\x95\xd5\x08\x19\xe5\xfd+\xa8\xe9\xa6z#\x05\xa9\x19u\xaa\n\xd6\xd8\xa0O\x04\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01e\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa4\x00[\x00\n\x00P\xd6\xcb\x94\x84\x9f\\\x06\x05\xeb5\xfc#Y\x94Bj=\x9diR\xcd\x80\xde\xe0\xe5\xa5MD\x97O\xc7wn\xbd\xc4\x17\x9b}\x83^u\xb2\xb6\x9ea\xd0\x7f\xc0!t\xb7\xe2\xb6\x85\x94 \xc0\x8e\x96\x9b\xaf\xc2\xeb\xb5\x9e\x93x\x87I\xb9\r\xe8\xa4\xcb\x14y\xf4\xc8G\xc7\xd4\xaa\xf8\xa5\xe0\x1c\x02w\xb5\xa2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01f\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xac\x00c\x00\n\x00P\xda\xe5\xa4 D'\x07\x12S\xa0\xc9\xc5\x7f\xf6X\xe7^M`\x84o\xb7 \r\xc4\xa2\xeb\xa8R4\xccn\x04\n\x81\xaa\x8e\xe3\xa8\xd0\x11?\xff\xa8\xdaNfy\x88s\xc1X\xf3\xe2v]\x19Z<\xc4\xbd\x94\x14WD\xc0\x15\xd7BF\r\x93\x10\\r\xe3\xc8A1\x1e\x8c\x90\xef\xdb\xc6\x04\xe3\x96\xc2\xb8\x03\x99\xc0\xe0\x00\xac@T\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01g\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa7\x00^\x00\n\x00P\xc0\xee\x04/_\'\xba0\x85ai\n\xccL\xa0\x9c}\r\x18\xf9~\xd7\xc3\xfe\xe3j\xdal\x9af\x9c\xac%\x83\x1c\x9e(\xbdA\x08\xa9\nZ\x9c"\xa2a\xe5\xac#`\xe2\xbe\x92\x9d:=d\x05\xdd\xf4\x11h\x7f\xd7gE\xb8|\xda\x87\x99\xf6\xc1DMO\x94B\x93Rm\x1d\x1b\x0b{\xff\x8ci\xdb*\x10\x82\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01h\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xc4 +\xf8f\xccT\xbd\xacO\x8c\x00U_\x8fh\xbb\x07\x8e\x05\xf7\xc3\x9d\xf82\xb9\rgU\xd3u\xcb\t\xb9\x03\xdf\xd5\x0c\x9ao|5\xcb\xc9P\xa2\xafm\x89\xd1\xa1z\x95\xa23I\xd8\xcd\xaaz\x8ef\x11\x97\xe2kel\xff\x91\x1f\x16\xb6$(\xd7o(]\n:\x0e\xcd\xd8Prr\xb30\xdf\xecr\x0c\xee\xa0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01i\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb1\x00h\x00\n\x00P\xc1G\x93L)\xe7\xe2_\x87Ou\xf6X8#\xbe\x0b\xa8\x02O%\xa0\xc9/P\x0b\xc5\xe2\xad\xa8\x89Osa\x90\xa2\xba\x81@s)\x1a\x1fpC\xb5\xe5\xa0gA\x1arq\xd3!\xbc\x11\xd1\xc1\xda\xfa\x9ebs\xf5\x9c\txb\xbc9Kr\xdd\x1d\xc8p0\xbb\xff\xfbY\x8c\xc4\x11\x8bj\x081\x9e%\xc7\xf3\xb7\x81A\xf3\xe3\xfeu0\xc4\x8d\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01j\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x8e\x00E\x00\n\x00P\xd3\xabG+F\xcc\x98\x02\xdcgu\xa5=\xec\xdb*\xb8\xe9i\x8f\xf2E\xb1\xa0\x85\ro"\xdf\xf1\xedS\xc29\x83\x99\x9c \xaaw&\xe4\x08\xfd\x11\xe0\x8d\x8e\x9e\xefO\xf3\x1a\xcc\xa2\xc4\x06c-\x07\xd5\xfeH\xfd\xfa(\xc4\xdf\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01l\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x95\x00L\x00\n\x00P\xd2[W\xec\xdd\xca@2\x10Cq\xac6\x1ev \xa3Re\xed\xf4\x80\xd0\xc5\x08\xd6\xd0\xde\t3<\xdb\x1aHv\xbcB\xb2\xa5F\xff^\xd6\xacd@\xe8\xbd*_\x80A\x87S\x15,\xfb\xff\xb0`\x04\xa8\xedN\xff\xd2{\x014\xeb\x16\xa3\xaa\xa5\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01m\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x98\x00O\x00\n\x00P\xd1\xe7,\x86S\xe4b\x91\x01\x7fc\xdf\x83>qi>u\xf0\xca\xf1I\\\xb5R\x9a\x84\x13f\x14\xef\xd2\xa1c\xc38\xd3<'\xd9k\xa3w\x87k9ARp,\xbb\x91\xd3\xbb?\xf6\xea\xcd\xe5\xb7\xca\xaf\xa3$I\x9b\xb1P\x1f\xe5\x10\x07C\xeb\x85\x19*D\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01n\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xaa\x00a\x00\n\x00P\xd3\xec\x1b4m\xfaW\xf3~\x90\x8cW7\xc9V\xf5&)\x02\xdc~\xbe1\xfbl\x86\xd0\xa7\xf7\xd1W?\x8f\xc5\x042X\x88\x0bQ\xa4\xe5\x16\xbf\xf7\xee\xbf\xca\xa6\x94!\xf8\rO\x8c8a%\x89\x89\xaf\x94\t8\xef\x04\xd1\x9bU\x8do\x9bU\xc7K3\xe1\x92\xf4h\xf7+\xb7\x94]\x91P\xd4\x00\x16nF\x99\xe4.@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01o\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb7\x00n\x00\n\x00P\xdf\xa1\xfc@\xd7<~\xa3J\x1d`\xd0\xe4\x98\xf9\xde\xd9C\xd4\xa9\xc4\xbd\x1eV\xc3\x9bj\x99q\x9d\xae\x9f \xe8$]\xe9\x8fL\xdcDi\xad\x98"\'\xaf\xc8mR\xc7\xb2\xe3J\xd61\xd7\x0c\xee\xcd\x0b\xc5\xd4\xf2\xe9\x90@\x81RY\xe9\xc7\x9c\xa3\xe9\xd4\x10\xbd\x05\x12\x0fKH\xbe_\xb5\xcdUA\xb5\x9a\x19/ux\x9b\xb2\x03O\xc5>\xf7u\x01\xa9\xfe\xddv \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01p\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xaa\x00a\x00\n\x00P\xca\xb3E3O5r4i3m\xca\xd4{\xe1SH\xa7w\xce\xb6p\xdf\r\xdfR\xcb\xb7n=\xa6\x8a\x1b>\xdb-\x1d\x94\x8c\x03;\xf7\xac%\x94\x90\xcd\xbc\x04xY)\xaf\x18\xd3c\xe7\xb0H\xd8\xd0\xc6\xbb\x15w\xbabX\x99BL\xd4\xd05~\x08\x9e\xbf\xd1\xfb\x80\xe8\xf5]\xe9{\x89\x1a\x0b\xde\xb9\xd3\x82\x8cw\x14\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01q\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa8\x00_\x00\n\x00P\xdb\xd5\x195\xd1A\xa7F\xdf4\xc0\xf9\xa1\x1e\xe6\x89\x03\x05\x19BK\xc9\x13\xcd\xc0W\x1ecc\x14$N*4\x9e\x9a\xbc\xa0U\xa1n_K'Tn\x1c1w\xac3\xb6\x19\xea\xd7\x00\x02\xb65XR\xf3\xc1\x98s\xd6\xcb{\x03F2\xd6\x1f\xceI\x93\x96\xd2\xca%u\xfd\x88X\xecL\x06\xd7\xdd\x10\x05\xd7\xb0Q\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01r\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9b\x00R\x00\n\x00P\xdb\x02\xc7R\xe4\x96\x19G}\xb7Q\x0b\xca^7t\x12\xfdO\x1f\x1eu\x06\xeb\xcd2\xfdU\xdc\x8c\xe1\xceW41\x8f\x8a\xba\xd6\xdb\x18`,\xa1K\x8d\xe0\xc8\xc4,\xea\x824\xe3!\xf4\x7fx\x9a\xf8\xec\xdby[\xb7\x95\xb2B\xda\x1c\xc6\xba\xa7O`\x0b\xf7p\x0bA\xd0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01s\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x9f\x00V\x00\n\x00P\xd9\xdf$\xa7\xd0\x81;Z\x89\x9au1\x1a\x9f\xcb\xf4<W\x95\xf3qu\x88\xdcv\x17\xaf\xbc\xf8\x94\xc0\xcc\xa0\xa9\x00Ey\x85\x15O\xb2\xb8Rei\xa9:}\xe7q\x98\x199j\x83\xdbR\x06\x91'C\xee\xe5\xb6\xc0\x8c\xdej\x88\xca\xc0G\xd4\x98G\xb4N\x9b\xebF\xa8_\xf3\x1f\xe0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01t\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x94\x00K\x00\n\x00P\xd9\x05\xe0=\xff}\xf6\xfeL\x97\x88H\x95%1Yb\xafK\x81\x06\xc3\xd7\xe9Uz;\xed\xd5\x0f\x08\x0fE\xf9!\x0c\xf4\x88:\x02\x12BW\xf44\x8a\xa2\xc2\x8bZ2w\x10\x95N\x89\x8bL\x96pOj\xe1T\xf0\xa4(\n\xe3EV\x93\x96\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01u\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa1\x00X\x00\n\x00P\xd8vy\x06a\x91\x1f\xbc\xc0\xbf\xf3P,L\xdc\x82\x84\xd7;\x1aA7\x8b\x04I7\r\xaf\xdfQ\xbdy\x16\xf0\xdd(G\xfe\xcb\x87\xda\xf7\xb1#\xc3\x9b\x0b\xc1\xb0\xa34\x88\x80w@RR\xe0\xb0\x0fvl\xc8)\xd5(p\x03\xf9\xbd\xbc"\xb9\xa5x\xb9\x91\xa7\x9fUk\xafD\x02(\x9b\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01v\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb8\x00o\x00\n\x00P\xd6Sx~\x12g\x94\x0e\xe6Q\xbf\xbf\x00\x17FD\xd9\xd8\x18(\x10\xc2\xcav\xb0\xb8\x074HW\\\xb6rw\x89\x14\x10)\x81p\x8fI%\xe2z\x93\xef\xb3\x02I>\xcf\xfc\x84.\xb1\x839w\xde\xde\xf8\xdb\xb4\x89\xfdz\x7f\x15p\x932\x14\xab\x0e+kt\xe6\x95*5l9\xd7y\x00\x9cp"f$\x89\xa2\xf4\x0e\xcb\n\xe4,\xabG\xcf)\xaf\x9e\xb8x\xbe\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01w\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb0\x00g\x00\n\x00P\xd2\xc3%\x8fv\x8bEf\x8bo\xf0J\x1f\t\xc2\x04*\x8b\xfc\xd1b[\x9b\xc7\xfc\xe6\xecpQ\x12\x93JS^\xa6\xcel\x18\xecL\x0f\xd8\xc5\x02\x01F\x80s0r\xd7\xe1\xb7\xf1<\xc3\xfe$\x9e=\xa5l\x14\x15D\xc4A3\r\x15vC\xd7c(\xe7\x9f\xe6\xd2T5oM\x1f\xf1\xd9\x110\xc2K>\x15)\xd2F\xcee\x9cg\xc8\xaf@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01x\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x9e\x00U\x00\n\x00P\xd5\x98\x8a\x01\x93l\x14*5V\x1a\xed\x98\xf6\x1f\xc5zBA\x15\xb9?\xc3\x18;\xc1\xc6:ekf\x99R\n\xc4\xd2\x9eO\x8d\\\xca'zv\x12_Ys\xd0~\xad\x821\x05#\xecO<\xb1\x13\x06\x8f\x1f\xd6\xc1\xcb\xa9\\\xf0.\xf5\xe0\x0b\x8f\xf9\x97\x83\x8cLnIm?\x03\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01y\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\x9f\x00V\x00\n\x00P\xd3\x18\xf3\x94\x1ew\xa1\xed\xb9\x8c\xf0\\\x88\x16\xc1\x977\xbf\xcfC\x90+\xd5\x9f9\xb5\xaf[\xfd\xda\x14\x99\xe3\xcc\xc1\xda4T\x98\xe9\xfa\xfew\x05\xfd\xee\x95\xff!\x8b\x138\x1b\x7f\xab\xd7`\xc8V\xdc\xef&\xf1TT\x99\xb5\xa3\xfd\xa1N\xe6\xeanBYT\xb1Z\x10'\x88Zc \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01z\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9e\x00U\x00\n\x00P\xce4\xbe6\xaf4R\x83\x06\x93\xa5\nM\x11q\xc8\xdc\xa0*\x9c\xe2\xa4T\xfaB\xdc\x1e[\xdd\x0e\x9ca\x9b\xe0G\x97\xb4,\xa4\xcdZ4\x15\x11\x7f\x9e\x04\x07\x8d\xf4\xc3!R\x89\xbb\xd0\x1aj\xe1z\xbf]6#d\x9b\xabF\xe4\x99<]e\xa6O9\xd6\xb0 \xde9\xff\xfd\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01{\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9c\x00S\x00\n\x00P\xc3\xb5O^12\xb3\xef\x0bz\xb4\xb6\xacvk}\xdaQ\xf1\x01*\x0e\x9f\x0b\x05\x00\xa20\x90)\xe4\xf5\xf4\xb8\x14\xbd1\x00\x87\xc5\x9b\x0exZ\xea%\xfe\x03\xe2\xb4\xdf!i\x91\xd6\xbf\xcb\xc7,dpkQTU4&\x9d\xd9\xebL\n\xe2\xbew\xc5\n\r\x9c\xf1\x04\xa8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01|\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x9c\x00S\x00\n\x00P\xc3k\n\xf2\xc3\x18|\xb8\xba\x11\x8c\xa5Cu\xfe> \x948J\x96\xdf\x18O\x94\xf8\xd8\x8f2t\x03\xbd7\x9e\xf4\x15\x19\x9a\xb1X\xc1\x8cGx\xcc \xe1E\x89?r\x83`\xeaQ/'|K\x1ecJ=\x15e\xbf\xd0\xc9E]\xbbb\xa4 >\x0e\xfe|\xae\x84\xa0 \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01}\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xab\x00b\x00\n\x00P\xc4\x9f\xf3\xac\xc0^\x94%\xca\xe9+\x8e\xf3\n\xd8G{\xd9L\xf27!'L\xa6`\xbd\x079\x87\xb7m\xe1\x10\xc3\xf9;-in\x96e\xf0\xc1\xdc5,\xe7\xe8<\xfe\x06H\xd3\rV\xb4\xaaMP\x12\x86\xb2\xbe/`\x80\t$\x14Do\xc6\xfa=\xdc\x12\x91\xbf\xa3\xff0\x81\x9dF\xc4\xf6\x8d\xc0WyU\xd0\x94\xcer@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01~\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xa5\x00\\\x00\n\x00P\xd3i:\x84xT\xd8\x17?\xb1\xef\xafR\xecN\x9d\xff'\r\x06\xafZ\x86\xeb Pt\xfd\xb0hhd\x18\xe7\xd4\xde\xe9\xf0\xf9\x18\xc9<\x8c\x99G\xbc\xfc\xcfQ\x13\xf7\x08\xf2\xc9BJM\x03n\xeePb\xa7\xb9@\x94\xcd_\x0b\x12/\x83\xd6\x84\xdaO\xa2\xfdS?\x168\x92l$\xc52FR\x1d\x88\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x7f\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa0\x00W\x00\n\x00P\xd5\x90j)0\xd4\xb0\xbf\x86\x8f\x88-\xe5(\x9d]\x8d%$\xe1\xb9s\xf6\xc7\xb4\t\x93i\xaeL\x9a\xe3\xd9\xb6D/\x186\x96\x7f\x8eb\xa2m\x81\r\n\xa6E}\x03\xa2"@\x0e\xea\xf6\xa5\xb1\xe0\x14\xf87\xfa\xe2\x9f\x998\xc8\xdf\xb0Q\n1\x8b\xa1\xae\xd7>\xf2\xbbucW\x1b\xd2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x80\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb2\x00i\x00\n\x00P\xd2\xfe\xdeKC\xbf\xe5i\x9b4\xb0*\xc94\xf76\xb00A\rlReX[(\xc6\x83`B\x95/~\xc7\xe6Q\xa5\xee\x05\xffw\xa1F\x8e\x1bRa-h\x18\x93\x93/\\3sj\xab\xfc\x01\xc6\x1f_R\xa3\x91\xccYY\xd2\xddm\x92\xdf\x86;}\x8e\xd7\xe1\xe7\xd8\xe1%=\xc2\xa7\xe20\xa9\x13\x17H1\x1f\xbd\xcf\x1b\rL\xad\x0e\n0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x81\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9e\x00U\x00\n\x00P\xd0\x87V\x00u\x0bk\xd0\x84CjW`\xf7\x83|GS\xd66\x12\xd5\xc2\x11\xfd=w,\xeaw\xac\xe8h|\x0eT\xf35\xbf\x14\xa8\xfa\xf7(\x96/\xe0D\xab3\xf0I\xd6\xae\xf9\\\xb0\xd9\x1f\xae\x844\xd7\xf8\xe1\xda\xb0\xf5\x15\\\x97\xc2\xf8\x10\x1ai0r\xb5[\xfc\xcdcp\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x82\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xba\x00q\x00\n\x00P\xd2\xa8\x91\x14`\xcc\x92\x98.\xdfU/\xcaw"\x0c\x93\xc4h\x16\xf1\'\x94(r\xfe\xaf\xf7)$\x13\r9\x07\xf7p\xc9>\xbf\xb2\xc6\xe59\x16\xbc~\xd7\xda6\xd1\xcbL\x1d\xaeC\xb6\xcc\xeeJ\'\xd5\r\xe1Wm{\xd7f\xaa\xddIvPvY\xcf\x018[\xd6o\x1b\x13\x19\x1de\xacy\x14\xfe\xda\xc15Ql\xd7\xf8G\x10\x02\x98\x05\xd3\xbb*d>!\x88\x9aG\x8c\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x83\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa4\x00[\x00\n\x00P\xd7\x99z:\xf0\xbaT\xc8\xd3b\xc3}\xa5t4\xef\xc8\xe4\xd6\xabT\x02f\x1e\x1d\xcfes\xbd\x9f\\w\x0cE,83 =\xa5\x18\x00\xe1i\xb5\xe4C~\x99Q4\x95p\x87Wu\xe2\x1f\xd7\x16\xcaO\xc1\x8b\xe3\xc4rG\xa8[\xfe\x8e\x8e\x07\x9f\xf7\xc1\xdd\xc66\x12P\xa2ryqw\x13\x14\xf7\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x84\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa5\x00\\\x00\n\x00P\xda}\x18\xab\xc0\xac\x8d\xae<\x83\xee_:\x11E\xeclu\xa8x\x03\xb9eu\xb5[v{\x8f\x86\x01\xb5K\xe0j\xd9I\x94\\W\xe1L\xa3\xdd\x18\x9d\xa8\x1f\x9a\xe3\xa4\xa6\x1a\xa96=\x83Ow_U\xb9y\xa7[6\xd7Zl\xfc\xe97/NG\xc5`\tc\xf7\x7f&\x94\x00\xe7\xeb7P\xd5'\xb6\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x85\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x96\x00M\x00\n\x00P\xd6\xbcyN\xa62G\x90\xbf\xbd\x08\xef\x1f\x1a\xe5\xc4\xda\x13\x80S\xd7\x1d1\xc7\x13\xe09\x157\xea\xe2b+{4\x87a\xd8[\x07\xb5\x8a\x92\xa0Ku\xde\xab\x98\xd9nH\xe2BeS\xedVc\x83\x1ci\xb1ra{+\x83\x85M\x93_[\xddc\x84\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x86\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa1\x00X\x00\n\x00P\xd6h;\xc6\x99\x91\xc7tu\x18\xb8\xa9[}\x8b\xe2\\#&\xd1w\xd5C\x0f@FL~J\xb5\x1f\xa8%\xd5\x04wJc1.I\x95\xbdf\xf8O4\xd4j\x80\xdd\x8a1\xf7\xa4\x80\xa10\xa3)X\x83\x02BL\xe9\x9c\x8c\xf2\xd4\xc2\xdb\xb3\xdcpHg\xd4G\x8f\x99Gq\x94\xaf\xe2@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x87\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb6\x00m\x00\n\x00P\xdf45r\xb0k<b"\xe6L\x00\x18\x7f\xbbg\xe9A\xb9,\xa87-\x1f\x9a\xc7a=\x16\x85\xdf\x04"O\x1e\x17\xf4\x92\x84\xd4\xb6\x02\x87~3C8\xdf\x1c1(\x95\xa2\xda\xb8\xc85\xb9K]\x1b Wm\x80O\x04\xb9\xd4Z\x81\t\xc8O\r\x94=\xebzt\x99\xe5sph:\xac\x0b\xfa\x10\x80LY\x1d\x9dUH\x01O\x90\x8f\xbf\x1a\x86\xc4\x93K$\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x88\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa3\x00Z\x00\n\x00P\xc7k=\x03\xc1\x8b\xf50n\xd7\x88\xa3"}\xa1v\xae\xc3\xcec\x00\x18\xfe\xc1\x8c\xce\x84Q\xcb=\x9b\xc9uq\r\xcd\x80*\xbb\x03\xbd\xf0\xc1\x93,\x86-H\xf1\xbd\xe6\x8e\xde\x93\xcb\xd8PL\xb6d\x8e\xee\xdaQ\xf0l\xcb\xa7\xe9g\xecv4\xaa1R\x8dBx\xc6I\x03\x9fe\x87mu:\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x89\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9b\x00R\x00\n\x00P\xc8&\xa0\xbb\xdf\x13\xb3\x83`\x8d\xb18O#\x01\xe7\x9d\xe0(\xdd\x1b\x8cS\xaf\xe5\x1d\xca\xed\xee\xd0\x04\xb8\xc87\xf1\xff\x0e\xceQ\x92\x9b\x82\xd5d\xf0 t@\x94[E{\xd1\x15\xb0\xb2\xca\xa5\x16h\xad\xe1?\x16\xf3Y\xcfo\x03\x80\x1e\xb6e$\x8f\xc2\x8f\xfd\xf3\xda\xd2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8a\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xab\x00b\x00\n\x00P\xd3'4f\\\x02\x1c\xc7[\xb1\xef\r\x026\x82\x10su\xe2,\x0e`m\x12\\\xc5\xb6QB\xe1\x99^\x8f\xe6\xf3z\xed\xda\xccL\xea;o\xe1\xb5}Q\xa9\xad\x91\xfe\x98<\x86,d\xed\xa4}\xcb\xfe\xff\xaf\xc0\x0f4\xafi\xfa\xc5\x0c\xc9G$r\xfc\xba\xa5F\xdcW\xc9\xa5\xe86.\x90\xe1~x\xef`\xa5c\x1a\xb9\x86\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8b\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9d\x00T\x00\n\x00P\xd2\x93\xd3/4X~\x1bA\xa7\x7f\x12\x92\x8fX\xd5B\xa6\xd5\xb0\xed\x7f\xcb\xaa\xb6\xa3\xb6\xcf\xc6FVMc\xc4\xf6\x08\xdd:,\xa1,\xb9\xaf\x12l\x8a\xa4\xa5\x92\xc6\x9bT\x98H\x89\x12l\x04\xcdOF\x96\xbd\x8f\xe155\xc8\xca\xffC\xbf\xd9\xbeG\xcaA\xc1\xdb7\xd9\xaa$\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8c\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9c\x00S\x00\n\x00P\xcf\xbb\x19\xab6\xdc,\xda\x1f\xff\x18/\x9cb\xef\xd9]\xc7F\xab\xc5!\x1a?\xbd\x06\xc9\x87\x9f\x1c\xb5\x00YW\x83\xa5\xe3\x0e\xda\xfdY\x8a\xe3\x02\xa2\xd3uQF\x87\x81Nb\xc84\x8b\x1b\xbf\x91\x92\x0cs\x1cn\x8f\xf7.-\x1a9"\x845j\xadC\xc0\xd3\xd2\xe4+\xd0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8d\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa4\x00[\x00\n\x00P\xc4\n\xe9\x98^\xd7\xddS\x0b\xcb\xe1\x91|u\xa72L\xfd4\xa5C\x06I\xb9\x87\x03\xdauz{N\xf6n\xe9\xa1\xe4\xaa\x86E\x8c{!\xa8E\xefG\xff\xdd\x1b\xe7\x8f\x8b\x1f\x18f|$\xea\x83\x8d\xb9\x9c\x07\xaf=\xb3\xf9\tA\x8a\xd8H\x90y\xe4.\x1cj\xbb\x9dF\xe7O\xbc\x991R<\x8f\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8e\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb0\x00g\x00\n\x00P\xc6\xfb\xbf\xf3\x08\x83\xedT\xe3\xa7D\x81/\xa9\x01\xa45\x0e\xf7\x84"\xb1\xa8\x0f)\x8e[n6\xc7[1n}\xb1\x7f\'9\x0f\xc9\xbf\xe2tH\xfd\xecl\x14J%\xc7@\x93\x97\xd6!\xdd\xb2\t[\x90>9\x05\xf0.\xe0>x\x7f"d\x0b\x8a\x13D\xb82GX\xc0\xb3\xdao\x15\xdd\xd1\x1b\x80}\xee\xd2\xafV\x06\xa4D\x85\xcd!\xe9\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x8f\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa5\x00\\\x00\n\x00P\xda\xb0pwPlZ=\xda\x15\xb0\x88J\xac\x9f\x19E\xc2\xc9\x1c\xa7WI<\x82\xde\x17\x03\x06\xd7S\x9b\xea\x83\xba\xa0\x9e;\x18\xe6r*B\xaaF\xf4\x87x\xe4\x9cDO\x19!\xa9\x8b]c\xdf-\x96\r\xc9\x1b>I\xb3&\xa9\x9an\xbc\xa3_Ca\xb0T\xe5\xcc.B:\xf0&E5e4\xa4\x88\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x90\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x8f\x00F\x00\n\x00P\xda\xfe|\xea\xa1l\xe7\xcf\xd7\xd6\xcaITu\x8f\xf1\xb7\xf1[\x96\x91\xc3`.\x97\xe50\n~\x92\xa7\x8d\x8edY\xd5E\xcb\xc6Tj\xf9p\x99\x91\xbaf\x18OH)\xbe-\x00W\xfc\xdf\n\x88\x189\xd0\x19\xd1\xd0\xf0\x9dd \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x91\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x99\x00P\x00\n\x00P\xd7\xdfR\xa90\x81N\x11!:b\xea\xbf\xc52\xf2z\xb6\xe3\xc08\x8e-C\x1d\x8c\xf6\x87\x13\xd3.\xd1F\xea\x88%`w!\x93\xb9\x14\xd2\xb9]o\x84\xdf\x86\xc0\xf7F}\xa0F\xb3["\xde[\xfd\xa0K\xd6\xfb/\xbb9\xeaw\xccpS\xc7!\xb3\xd2\xbd\x84\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x92\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb6\x00m\x00\n\x00P\xdf\x17\xba\x1aA\xd8\xa3\x11\x9d\xf7e\x9b\x14M\xa2\xb0\x1e:\xd6\xd5\xe4\xfffl~\xc9\xc4\xd2n\xd33\xa5 \x9a\xe2\x17\x1d\xe0@\xe1\xedp\xde\xfc\x17\xba\x9a\x95E\x0c\xa9M\xb9T\xbb+\x8b\x12\xad\xd5\x06\xae\x06\x80\xac\xcd]G\xb1"\xdf\xeb\x14\x95\x9c\xa2\x19\xb7\xe8al\x8c|\x03\x7f\xfaP\x89!\x96S\xffO\xc3\xec-+\xa9\xaab7+\x0e\xce\r\x90\x8f\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x93\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa6\x00]\x00\n\x00P\xc9\xe3G'\xd1\xa7\x14\xad2\x81\x86z>6\x90\xc8\xad\xcd\xd5\x04\xe5\xe0\xac\x07j\x8a\x00\x9cI0\x8b\xa7\x1c\xc2\xe0\xb1p\xed\x9e\xe5\xa3\x0c\x17\x18\x06\xce\x14\xf6\xb2\xf2GJ\xcf\x8c^\x98\xde\x10\r8\xa7\xder5\xf7\xcd'Pd\x19V\x06\xbcL^\xed1\xd5\x81\x95\x80\xf7\xc4e\x01\xd8\xa6\xacr.;\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x94\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9d\x00T\x00\n\x00P\xca\xb2\xabfp5O\x96\xd26\xb7\x1a0\xe4\x19\xc1\x1aKN\xe1p\xbc)\x0fC\xbd\xf5\xd2\xa2\x14a\x0f\x81\x07\xc5\x81{\xe2=\x88\xc5\xb7\xeex\xe5\xf92\x1eh\xbfJ\xf5\xb7\xea|\xe5\x07\x91\xe6\xe4\x08\xffn8\xa3(H\\c\x02\x9b\x90\x11\xb0\x87z\xdd\x0c\xcf\x0eu\x14\x90\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x95\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9e\x00U\x00\n\x00P\xca\xba\x86\xb4\xb9X\x8a\xa5\xf1\xba\x83\xe7+9\x94f\x0f\x0e\xfcC\xa3!3\xeb7V\xca\x0b\xc2\xad\\-\'=\x82B\xa2"\xff"\x0e\xc7\x85\x92\x13\xad\x9a\x1dJ\xd9N:5\xac\x97\xeav\x15Q\xcb(\x83rLM\xee\xc3\r\xeao\x9a\x9e\x0bQB\x9eIH\xb6\xc0+\xae\xae\x8c\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x96\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa0\x00W\x00\n\x00P\xc8\x1d\x1f\xf4\x02\xa6|\xf8\xb2\xc7O}\x86\xd3@j\xe8\x17\xce\xc2"\x8e75\x05\xefu\xcd\xe2\xfb\xf0\x1d|\x11oe}\x99r\xa3\xd6\xef\xe3B\xf4\xcb\x82\x02\x80\xb2\x9f\xdd\xc9r#\x1e\x0e\xdaC\xdc\'\xa6\xf3\xd8\x81B\x04\xc63\\)j\xe0\xae&A!\x95m\x88^\xec\x8cv\xd9 \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x97\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa0\x00W\x00\n\x00P\xc1\xe4\xcf\x8c\x81\xcf\xdf\xef\x08r\xba`\xe1\xda\xf4\xc2\x1306\xff\x82\x9a1\xc0\x1a\xd0\x02&>\xb6\xde\xb1vkJ\xefld&vO\xf9<\xac\x8b\xdfN\x0b@\x08\xf8\x90\xf9Ho\x0b<\xa4\x8a\x94\x7f\xf4\x88K;\xbb!\xd3go\x8c\x82*8\x9b\n\x8eC\xe7\x83\tP6['\x87\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x98\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa5\x00\\\x00\n\x00P\xda#U\xd8#\xda1\x18\xbb\xc8\x9c)\x1c\xdf\xccU\xc1\xdbW\xf2:\xb7\x14\x9d?a,\xc07g\xea^\xd13Uy\xe8\xcf\xa0\x82N\xc2"\xbc7k\xbeX\xe0E\x02\x92\xc1\x9fY\xd9#\x04*+p4E\xf2Y\\\xee]\xd1\xa7\xab\x1b\xe4\xdeQ\x02Qb"\xa0X\x93g\xcf\xf1<C\xccN@ \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x99\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa1\x00X\x00\n\x00P\xd9\xd1\x8cyi\xc5veZ\xa9\xc3\x9e\xed\x8dV\xc8\xec\xc9\x8e{\xbbG\xd41\xfd\xd8>\xecjgM\xf4\x9b\xebn)O\x91\xea\x11\x95\xf4\xbaW\x95z\xeb\xfc\xefaD\x03\xfe"\x07\xc3\xf7\xc2\x9fw\n$[\xa8V\n\xe7\x80\xca\xa6\x12\xf5\xcb1\x8a\xc0b\x85\x9a\x1a\xa3Q\xd6se\xca\x90\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9a\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa5\x00\\\x00\n\x00P\xd7\x05\xfb\x84\xda\x0fG/f\x7f\x1d\xe5MMx\x9d\x1f\xc6\xb0\xfe\xe1$6;\xb2\x04\xba\xebaniI\xb6\x0crow\xe6\xe4V\xa9\x92\xfb\xe4\xef\x06\xd2\xcc \xb7]K\xd0\x8c\xc8se\x8d\x95\x0eNi\x95\x18\x03\xae\xfb\xb5\x1a{\x95:\xadr'\x02\xa90\xcc\x85\x806v\xb5\xd5p\x05\xa8L\x8f\x9f\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9b\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa7\x00^\x00\n\x00P\xdf\x13\x0e\xe4\x9b\x11\x05M\xbdRL\xc4\xda\xca\xa9\xc4\x84\xdb\x89\xa3\x98p\xc4U\xb7\x1aR\xf1\xe7!~q\x0e#V%\x1c\xb5>^\xb6\x9eb\r\xb8\xfc\x81\xbeq/\xac\x8f\x1b\xe4bu/\xa1Y\xf4$v\x9a\xaa\x07E\xdc\xab\xeaC\xb7\xbcF>\xcc3C\x1a\xa4\xb4\x84\x9a\xc7\x9c\x8e0\xbbYR\xef\x91o\xe0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9c\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9f\x00V\x00\n\x00P\xd6\xcd\x1d)\xcb\xc2\xa0\xfd\xa9y\xa2\xd0w\xcc\xaf\xf5\xbf\xd9\xe5\xac>\x96\x84\xe5\xa7?\xd1v\xa5W7\xf8\x119\xef\xb21\x13\xb16 \xd5\xcej\x0fL\x94gz\x7fy`\x13 Wk\xa1\xbd2\xfeZa\x7f\xa2*\xe5J\xcb\x81\xd8\xf9\x1d_\x8dMXw\x1e\xe7-?\x93\xe6O\x88\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9d\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x86\x00=\x00\n\x00P\xd9\xdf&\xd2\xa9<\x02'\xe3\xa0\xbc\x0e\xaea\xe8\xf3e\xc9\x92\xfc\xbcQ}l\xe0\\\r\xa8\xc4a.\x16de\xd3\x81\xd1#\xc5\x7f\x86\xecQp\x11\xa8\xb8\xacP\x95Sg:\x89\x07$\x92l)\x17\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9e\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x8d\x00D\x00\n\x00P\xd8\xc6V\xe32<`\xd0\xe03\xfcx\x8d\xfbi\x18\x88\xf9\x96\xb6\x81\x93J\xfa\xe159\xd1\x95SP\x82\xc0\x87\x17\x8azm5*e4\xec\xcek\xf85\x17\xa8$\xe1n\xf8\xfd\x1c*\xc5\x18\xec\xd7&Eu@\x9e\xbe\xa2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\x9f\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x95\x00L\x00\n\x00P\xd5\xa0\x075\xc5\xf7+~\x18\xd9a\x8f\xbf!x\xf3\xe5\xe8Y\xa6\x1f\xe9\x8d\x99z\x1f\xa2\x9a\xcf\xc0>]\x04m\xf9\xd8\x16%\x18\n\x16\xb5S\x01\xcb\xeb\xe1\x14\xd2w\xc4\x07\x8e\xc9\x9a4X\xd7h\xd1\x8cR\xc6Y\x84\xe84\xdc!c\x93\x86\x00\t\x1b\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa0\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa0\x00W\x00\n\x00P\xd1(\x92_5\x87\x9f\xacy9\xd0]\xc9\xd6\xf9\x1aF\rz\x1a_y\xe6\x047\xb5y\xc5U\x99\x92\x04X\x94q\xbd%@\xa4\xe37\xb9\x8e\x15\xa5\xe6@\x8d1\xe2y\xe8\x01\x1f\xa3\xb7^\xed\xba\xe1\x91q\xe5/\xaf.6\xc8\x04\xb0N\xd1\r\x16\x97\xde\xbe/\xdf\xff\x0b\xc8T'B\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa1\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa2\x00Y\x00\n\x00P\xceM88\xdd\xcb\xd6}\x9a\x9f5\xf62\x16\xf5&\x1d\xaeJ\xbd\x94\xb5T\xaclc\x01+\x0ctD\xc9\xd5\xd0\x99$5\xfd\xd1\xa1\xb6\x00\x90\xef0L\xc2\r\x9c\xa6/\xad\x16\x8c4\x7f\xea\xbc\x12\xbb\xde\xa8E\xb3\xfd\xa4<H\xd2]Wz\x85\xa17\xa4:\x05\x0f#\xd2\x7f\x05\x13\x08_\xd2\x98\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa2\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x91\x00H\x00\n\x00P\xcdM\xce\xdf\xdb\x0fY\x1b\n\xfeR\x8f\xcb@\xb4u^I\xa1\xb9\xaf\xe9L(\xe1z\xa4\x1e%\xb9\x90 \xd9\xd1"\xad\x96\x0fFU\x8b\x01\xe4\x90\x98\x97z\xd3\xee\x15ed\x80GSl\x87:Y\x8f\xdf\xf5\x90 \xd8[\xe9\xf6r\xc1V\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa3\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
],
'burst': [
b'd\x00\x1b\x00\n\x00P\x04R5\xb6AU\x16m\x00\xa3\xa7d?tP\xbbk\xa3sPe\xf3I}4\xae\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa4\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'i\x00 \x00\n\x00P\x04R-t\xb2+u\xac\xb2I\xfe\xd3_\x03\xec.4\xa1\x8f\xa4\xa1\xe9\xc4\x9f\xefB{>\xcc\xe9\xd3\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa5\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"c\x00\x1a\x00\n\x00P\x04^'\x08\xd9aJ};$%Pl\x810C\x9d4\xd3\x881\xee\xfa\x9a\x9a\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa6\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'`\x00\x17\x00\n\x00P\x04WG\x84mFS\x11\x92a\xb9\x16\x122\xdc\xab\xccm\x9e\xfa*\xaa\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa7\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'a\x00\x18\x00\n\x00P\x04WB!h\xc5\xe7p\x17\x03\xe3\x88g\xb0\xdf\xdf\xd0\xe4\xdd\x17N\x93\xc4\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa8\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'_\x00\x16\x00\n\x00P\x03\xe4B2/\xe0-\xf5\xf0\x94\x07\xea\xfe\xf7"\xc6\n\x08s%\xfc\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xa9\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'a\x00\x18\x00\n\x00P\x04W\xb1Y\x89\xd9eQ\x0b\xc1\x80\x85\xc6\xdaq\xbf\xf6\xdf9\xd9\x9a7\xa0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xaa\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"`\x00\x17\x00\n\x00P\x04W0d\x8f\xaa\xee(\xedf\xaf\xee\x00\xdc'q\x19\xa4\x08\x9f\xad\xb0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xab\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'd\x00\x1b\x00\n\x00P\x04W\x8f\x05\x14\x97\x8fp\t\xfd\xc5\x92\xd4g\x7f\xa8\xa1\x05\x16\xac8=\xc8/\xfb\xad\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xac\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'a\x00\x18\x00\n\x00P\x04R-\x15\xccF\xf4\x04\x8f\xedC\xe6\xb2\xd5Y\xdf\xc5j\x04S\x0cs\x84\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xad\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'x\x00/\x00\n\x00PDb\x9f\xb30\xa7J\xfem\xed\xd2P\x92\xa1\x07B)ZiM)\xb19\x89n\xcb\x16C\xe0\xa3\x17\xcc\x16\x82\xa1\x14T\xa3\x88V\x1e\xbe1Gf\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xae\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xc3\re\xa2w\x03\x88|\xf7R\xe3\x14R\xf0\xacG\x7f\t\x18d<S\xc2\xa8\x1d\x10\xbb\xd8\xfe\x7f\xea\x01\xe5>\xcc\x1aP\x90\xee\xb21\x07\x8e\xa5\x9a(\xa4Xx\xa9s#\xc2\xb3ee\xf7L9\xa4x\xd0:\x18nU\x89r\x0b\xd2q\x9a\xcd\xc9\xb0\x89v"HJ\'\x8e\xff\xe23\xfd\xdf\xddJ=\x99\x15I\x88`\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xaf\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa7\x00^\x00\n\x00P\xd9\x9a\xfc\xa1\x1f+\xa3PgV\xff\xf9\xdcaS\x96g\xa4\xea\xc5,\xd1\xbbX_\x84\xf7\x1eS\xf6f\xe7\n\r\xba\xcb\xeb\x03\xeb\x98/S\xae+\xd8\x87\x1c\xf9\xad\xe3/\x03\xb5?%\xc0\xd6\x13\x9c\xbb\x9a\xdb\x86\x13\xad\x97\xfa8\x99\x9ba\xdd\x812t\xf4L\x95\x08S\x95rot\xda\xa2\x13>\xd1\xc24<<\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb1\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x9a\x00Q\x00\n\x00P\xda\x00\x8e\x02\x10t\xd7`\x8aU\xe9vI\xd86\x10J\xf7\xf4\xa4\xca`\xde5\x0f\xfd\xcf\x1b5\x99\xf9\xf1F\xad3\xbeJ\xb6'D\xad0\x8f\x10\xf9\x15w\xef\x1b\x83C\xa6Od\xea\x8c\xd1\t,O\xb0A\xaeH\xad\x04\x00\x12\xa7\xf5\xe1mUP\xfd9\xa1\r\xb8\x90\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb2\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xb7\x00n\x00\n\x00P\xda\xed\xf9s\x7f\xec\xd9\xcd\x81v\xd2\xff\xd0B\xad\xc3\x83\x14\x9e\xfc\xf7\x97h\xdc|\xaa\xd2p\xa0\xc6\xb8o\x82\xe5e\xe0\xfd\x87.\xe9.;_*^R\x9e\xa0#\x02\xbaM\x90\x90U\xb2\xb8J\xe1\xa8\xba\xdb\xc5\x9c\xe1\x03G\xa6\xe2]\x14\x18%\xd6\x99\xe2\xa8\xc5\xf0yf\xee\xb5\xd3\nc\xf0\xaa\x9a`\xf2IC\x05\xb0v\n\x81\xcf\xa3\xcd\xc9\xc3/\xd9\x9b\x00\xff\xf0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb3\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xb1\x00h\x00\n\x00P\xdf\xee\x18\xf6k\xa9\x80\x11Yl<\xe1\xdf5w\x1eb\xbb\xa1\x9fj\xb2\xa42\xdfs\x7f=\x8c\xeb\xcc\x034d\xf9\r\xa6\x8dbT,0\xbci@r<\xbe\xa18\xbc\xfc`\xbd\x1e\xc4\xc5\xb9\xf8\xff\xa9O\xa6L\xc8\x05\x94\xc8\xda?\xcc\xcd\x82~=\xaef\xc6\xd3\x9b\xad\x01\x1d\xb8\xbex'r#\xcd\x139\xdc\x1d\xc6\xc0ejS2\xbc\xc9`\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb4\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xb0\x00g\x00\n\x00P\xcbv*\x04\xcc\xf0\x06\x1d\xecQ\xc9x\xfbL\xec\xcc8R\x84BT\x070?\x15\xa4\xd7sU\xbbO\xb2\x1a\xb6\xff\xab\xa1\xd2@_\x92\xf5\xc2\xf4R\xf8\r\rR7]\x12\xd0\x85\xdf,C<\x84'M\x9d>+\x8f\x0bv\x9c4\x89\x15\n\xb7\x12\xae+\x10J\xa3\xf6$\x80%\xc8Mq\xa5\xd7dY\x15\xfd\xd2j`=\xd3d\xad\x06}\xb8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb5\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xa4\x00[\x00\n\x00P\xda\xe8\xe9e\x156\xd1\x053\x08\x9d\xfcl\xee\xc0\x010\xdfVO\x92\xd6\x98/\x03\xa2\x0f\r\x1c_\xafG\xe5Q\x8e[r\xcd7!C\n5\xad\xb8;'Yk\xb9\x13+\r^\xe0\xc0$)\xfcc\xb0\xa2,E\x936j$np\xde\x11H\xc8\xa1\xc5\xadoy\x96:\xcc\xf0\xe6\xd6D\x04\n\xdd\xe8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb6\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b"\xaa\x00a\x00\n\x00P\xd70\x89\xf0\xbb\x85'\x10\xe02St\x17\xc8-\xd6x\xf3t\x9f\xfb\xac\x03~\x7f\x85V\xe2\xa4\xbc^=\xfe\x94\x1eo8\x03\x8cqY\xa4\xe8t3DaS \x1d\xdf\xcb0\x81\x8c\x0b\x86\xae\xec\xde\x9b|\x81\xcc\xfcL\xab6\x80\xf1k%\xbf\x95\xe7\xcb\x00\xfbT['\x85\x0f\xb4\xaet_\xccJ\xfbh\xaa\x88^\xf1\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb7\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x9b\x00R\x00\n\x00P\xda$\xfa\xf9\xab\xa2\x84Px\x0f]\xd3\xc3\xd1\xf9\x14\x84\xa4P\x0cV\xa1jY\xc9#\xa7\x9c\x80\xe8\x9a\xd2\xa6\xa0\x9f\xd0\xe5>\x16{\x9e\xb7-\x17\xaaOIM,\x04\xe5}\xd2b\xbd$\xe1\xef\x18\xfd\xfa1~\xa4\xa2\x84eI\x9b\x91\x8eH\xe48\xed\x05\x92.\xf9\x8e\xd5\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb8\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa0\x00W\x00\n\x00P\xdab\xf0\xc3\xa0!\xe6\x16(?q]\xf4`\'\xcb\xfe:\x9a\xdc\xb7eg{?}~-F\xff\xe6F\xc6i+Qy\x97l\x11\xd0\xb8CXH\xce\x884V\x03}\xba"\xbf(\x1f\x17\xa5N\xcet\xc3\xa0\x8d\x87\xf6\x14\\C$\x9a)\xf2,!IP\xe9)WP\x86\xb4&\xa9\xf4\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xb9\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xb0\x00g\x00\n\x00P\xd3\xa9\x81$\xb3%+\x19\x82\x02'Dk\xf1\xeaA\xc9\x10\xa3Y#\xa2 \xffpv9V\x1f\xd4mC\x10\x0c\xd0\x82\xd4`\xf8\xc6.\xacI\xf2\xc9\xe8\xe0C\x8d\x91\xf4\xbb\xe4\xb1\xf6\xe5\xd6\x81\x01BA\x88Q\xad\xbbO\xce\r|\xec\r\r,\x10\x91r\xe6Q\xf7\x11\xf5\x90\xc9\x87\xd8\x06\x82\x01&\xde\x05\x85\xfe[\x86\xa9\x99O\x0b\x0c&\x90\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xba\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa6\x00]\x00\n\x00P\xd7\x9d\xde\nl\xc9\x7fO\xe9\xad\x8a"\x16\xd3\xca\xd7\xf3W\x8f\xb0\xf7|\xf1\xee.\xfd\x9dT?U]|\xcd\xde\xb3\r\xd5\x9dU`\x88\xe4\x90D\x12\xd1\xe9.x\n\xbb\x0eB\xd7Re\xb0\xc7\x8f:\xb0\xa5\xb2hQ5!\xfc\x1d\x12{\x8a\x8d\r\x04\xcf\xb4\xfeMS\x15TS\x8c\xdc\x8d`_\xfbh\xc4r\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xbb\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x92\x00I\x00\n\x00P\xda\x8c\x0f\xfdU\xe9\xc0\xdf8\xeb\x80\xb6}n\xe8G\xf0\xf1ve\xa20\x1d\xc6p\xfe\xc3n\xac\xed=\xc3\xe1\x89\x8c\xa6\x17\xf8^\xf5.L\xdf\t\x0fA\xb5\tW\xd2\x83\x861\xb0\xbb1\x10\x00t\x8c\x87\x18\xfb7\xb7p=}\x00\xc6b\x18\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xbc\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa1\x00X\x00\n\x00P\xd8\xbe`\xf9\x01\xef\x81\x12\x04\xc3\xcb\x98\x87\xdbp?^\xd3\xf3A\xa4sH&\xc5\xbb\xccT\x92\xc7\x87\x0c,\x9f\x03\x1e\xba\xc8\x84F\xcd\xe8o`\xa4?\xa7K\xc0\xb9\xa3cx\xb1\xdfu\n\xc8\xd4j\x13\xb06\xa3;\xfe\x0e\xc8\x196G\\\xb1\xeeH\x10\xd2N\x1eoY\x0ck\xda\x14\xa9\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xbd\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xaa\x00a\x00\n\x00P\xd1\xb4F;\xaa\xfdO\xdc*\xe97#e\xc5@\xce\xc7\xb8\xf9\x84\x88\x00\x98?\xe6(\x97g\x9f\xf3h\x85\x0b5\n^-3\xf2\x8c\xb7m\xfc\xb46U\x9d\x06\xd92N\x06\t\x9e\xb0\x18\xac\x10\x12S\xe8\xc2\xf5C\x1ey\xdd\xd2\xecE\xf5\xaf"\xae\xc0rN+\xaa\x04\x11A\xbc\x95\xdf\xb2\xc8\xc6|#\xa7:\x89\xa9\x12`\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xbe\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa5\x00\\\x00\n\x00P\xceE\x08B\x83\x1a\x96o\xce\x94f\xa4\xcc\x82\xd2SA\xdeS;S\r\x9e\x10\x1cB\xc1\x12\xdf;\xd4\x83\x15b\x92\ra\x15\xb5\x89c}\xc1j\x1d\xee\xe83\xbb\\!\x9c8\x14\x8d\x9e\xc8\xbe\xb1\xbd\x9e.\xd6O\xd8(\x08\xc6\x12t6\xcb\xd0\xc9I\x16fu%}H\x92J\x90n>1=\xf6~\xa4\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xbf\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa6\x00]\x00\n\x00P\xcd\x98\xd7\t2\xe9\x0ec\x0c\xef\x86\xec\xae\x86j\xf4Q\x85D\x0f\xac\x08\xa3\x1apl(\xeb?\xec^j;\xe2\xac\xc9\x8br\x9a\xed{\xba\x9e"\x98\xd7\xbd\xc3d\xc6#\xd4\xae%\xa7\xc5l\xec6<\x84\xe8\x80!\xff\x1a\xe3J\xad\x11W\xd3\n\xed\r\xc4h\xf0\x84S\xa8\xde\xa8\xe9\xa4\xff\xbd\x87;\x0b\xcf\xdc\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc0\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xab\x00b\x00\n\x00P\xd0;\xc1c'.Yz\x93\xc1\x8a\x14\x9b\xb3\r\xfb\x17\xe1\x03\xcc\x88\x08\x1b!cY\xac\x1f\xca\x0e\xf9dP\xb9\xd7b\x13O\\PM\x9d\xcf\xf7\x8e\n\xb4\x8ba/\xd5\xb6B\x05\xcc\x84i\x8dT\xec\xa3{\xbbH\xcd\xd8\xab\xd0zh\x9f\x03\x1c,\xc3\x9fW\xdfQ)O\xbc\xcfjiI%\xe97f\x88\xc8\xfc\x13J4\x9a\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc1\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\x93\x00J\x00\n\x00P\xd5\x9f\x88\x13\xffr\xb4w\x05\xc2\xbfApJ\x11\xcd\x910$\xcc\xec\x9f#\xae0\x12BD\x05\xaa-\xb9\x9f\xf4\xe9~\x88\x10\xe9\xd7\xf2\x9c]\x07\x0f\xdaA\xcd\x03)ZrV\xdc\xf8\x94\xd2,\xf7w\xce\xd9v\x8d\xfe\xf7\xf7\xe5#\xb25\xca\x1d\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc2\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9b\x00R\x00\n\x00P\xd4\xb2\xd5;\xa7\x19wnp\x86\xf5\xbd\x8f\xf5\xbbu\xd0M\xc6\xe4Lv\xa2F\xb79\x10p-!C\xc0\x8bJ\x1a2S1\x1bf\x81\xd6\x81\xa2AX\x90?\xfa\x82q\xf3R\x0fn\x8c\xf7-\xed*\xa4]\xc4\xf4\xc6E\x90\xd2PZ%\x89\x88t\x0e\xc3j\x98,\xb9J\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc3\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x97\x00N\x00\n\x00P\xd2\x93\x82\xdb^\x1c9\xb4\x1d.\x8e_\x90F\xbd\x06\xe0\xf9Bh\xfe\x9c\xf9\xf6\x84bG\xb6\x96\xb5\x11\xf19\x1d\xc3\xb3\x08\x85dz}l\x15N#1|\x04\x01\x91\xaf>\x1a5\x18\xd5s\xdd\x01o\x9e\xc4\x99\xd8,\xdc\xaf/bvGsq\xe7\x8d0\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc4\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xad\x00d\x00\n\x00P\xd1\xe76\xf5\xe7\x04\xcbt\x01[\x1e\x90\x14\xce\xceoi\x98E\xf0:\x18\xf1\r\xffy\xc04\x0c5I\x18\xe4\x9a\xc4\xa8\x90\xb5xp,\x9a2\xe1\xab\x1c\x01\x18,\\Q\x1f,\x0c\xfbQ\x18\xe5\xd5v\x018hq\xff]\xf7\xfa\xedE\x07\x95\xd9v=^(\x84\xad\x896\xb9\x8d\x8d\xd5\x92\xee\xd5\xa3\x96n\xf5\x01\xef\xf3Z\xfd\xe7\\\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc5\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb9\x00p\x00\n\x00P\xc8\xd7\x82^\xfb\xd0\x8fBp\xb7\\\x9b\x91e\xec5\xb5y\x8a\t\xf9\xb8\x98\xa3\xea{\xef\x0f?\rN\x1a?\x83\x8c\xbb\x1d\x17\xd6\xb9\x1b\xb6\xe9\xf3$\x8a\xd0\xd3\x13\xd2Z\xabK,\xda\x12S!5e\\K4\xb7\xdat-\xcf\xb2k\xa4\xad\x97\xdbS]\x12;@\x8d(\x0f\xe1\xc2\x8f\x05\x9b\xe7t\x19PR\xcb\xc7\x1e\xf8\xe5\x17*\xa8\xa8:\x92M\xaeX\xceU\xc6\x06\x98\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc6\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa5\x00\\\x00\n\x00P\xcbU\xc3\xa8kQ\xe8J6\x90**\x0e"X\xd6\xc8\x99\x83\xd5\xd3\xf4r\x16\x91\xe1h\x1c;\x00\xfb-z1\xd0\x0e-\xcb\x00^\x95\xadr\xed\xa9\x0e\x94\x1e9\x94\x19\x17K\x9cY\xe8\xe4\xcb\x10\xf1\xb7\xb5V\xefD\xec\xa51u\xf4\xe8\xb2\xbb\x17\xba+\xe6/\xd3\x12"\x13\xd0\xbf\xb3?+\x0c2\x7f\x0c\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc7\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x9f\x00V\x00\n\x00P\xcbsV\xcb.\xb0X\n>\xf2\x0f\x0b\x9c\xb9\xe2\xc3\xc9\xe2\x04\x84a\xa9\xe1\x08\x04\x1c\x97T\xce\x83M\x88\xa5]\x1d\x81!\x10\x0e\x11\x99\xe0\xe7,\xa7\x10\xc7\xf6\xf3)Q\x00@3\xe0\xe0\x9e\t\xd8\xb6\xc9rx'\x986i#Z\xd1`g\xc2\x9c\x1b\xb3\x12\xa6N\x10\xfd<y7`\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc8\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa9\x00`\x00\n\x00P\xc8f\xbc\x1c\x16{H\xe0\xac\xecu\xf3o.\x9b\x02X\xc2\xb5\xea\xa3\xdd}\x02\xb3*5\x9a\xccl\x11\x0b\x06\xb17n\x826\x86\xf4\xf0\x1b\x11|\x06@\xeb\xa2&I\x8eal\xfa\xf5>\xdb\x1b@\xd8\xa4\xcdgs\xbf\x85\xe4F=\xbe\x8eGFI\x0b\t\xdb\xb4\x83\xdd;L;\xd4^\x97@\xba\xae\x91{A\x9a7\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xc9\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xc2;+\xcf\xf26q\xc7\xd3\x0c\x1e\xb8\x8b\xe9?\x963?\x00\xdec\xacX\xb6\x90\xfa2\x8b\xd1\'\xc1-N*\xc3\x1e\xab#\'\xb1KGES\x1f.Z\xb1\xb3;?\xeb\x1d"\x9a\xee@\xc8Z\'\x8b\xd5v\xdd\xa4\x08E\xe9R\x1by6\x83\x0e\xda\x1c\x83\xfc\xb10\x96\x9bQJT)\xf3\x111\xd5\xb7\xc1\x84\x99\x1a\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xca\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xad\x00d\x00\n\x00P\xc1\xe4w\xe4\x84\xcbBnz{\xf1\r\xa5( \x97\xa1\xa8l\xed\xf0\xb6\xcc\xcdw\xc4\\\xa0\xdd\xd0\xeaUf\xac\x8c\x88H\xda\xec%}\n%\xfe\x1c\xcf\xccV\xce\xec\xb6\xfaj\xf4\x9a\x04\xac\xd6 w\xe8t\xbb\xcbL:\xbd\nq\rjT\xfb\x0f\xa0^zL\xd2\xbaF~.\rm\x18\x92\xf8)\x00\xc9LK\xef<(\xe2\x86\xd0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xcb\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa3\x00Z\x00\n\x00P\xc7=\xc9\x052 8\x9f\xf8Fk{K\xf3(\xfc4\x11:\xe8v\x96I\xe2\xdb\x16\xca\x04\xca\xf3+\xc6ol\x9d\xe0\xbf\x18\x07\x9c\xf6R\xfa\x17Ye\x83\xcb\xd1\xe8{\xc9<\x96\x90X5\xdd\xfa\xffjS\xf5\xe9\x83!\x1f.\xb99\xd7\xaa\xa7O\x980\xabz\x06\xfc{\xdb\x18\xe83K\xf9\x11\x90\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xcc\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa7\x00^\x00\n\x00P\xc5\xa8\x8b\x83g\xee\x1f\xec<\xf7\xb4\x0b\xd0\xff-\xbbF@\xf2\xdet\xbb\xe7\x05\x8e\xddz\xd6\x94\xd2\xb1$\xfe\x8fY.y\xd1%\x96\x01\xb79\x9a\x17\xc4\xb299\x9b\xfb\x8f\x94\xafg\xc9\xd9\xfb\xbeC\xeb\x13\xbc\x82\xca\x1e\xdf\xf8\xd3*\x14\x91Z\xeb\xcb=+5\x08\x99<\xb7e\xce}\x95h\xd1U\xda\xbc\x10\xb9\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xcd\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb9\x00p\x00\n\x00P\xc5\xc2\x18b\x1c\x16\xd3\xb7\x1ct\xe5\r\xe9"\x02\x1d\xf6U\x1f\xc4@\xbf#\x04.\x7f\x99+\xe5Tx\xe8\xf9\x85\xe3QSc*\xc2\xba\xaa\xf38-dJz\x83\xa8\xdd\xfd-\xbbB\x80\xd38\x08\x13\xbb\x84I\'KoL\x0eS\x1c"\x1927\x14\xee\xbf|\xb1\xe6\xd1\xeag]\x18\xc0i;\xf1=b\x98k\x9a1&\xe1C\'\x9f\x014\x02V\nw.\xc6*\x18\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xce\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xc1\x9d\xa3z6\x80e\x1d\x11\xa0\x1e\n\x191\x14\x9e\xd9\x17\x0e\xe2\xf0\xb3XLa\xbc\xc3\xfe\xb2\xe9\t\xda\xb9Z\x0b\xd3\xb8\x0fS\xdb\xbb\xd6\xf3 8\xefY\x9f6\x80!\xda\xaa7\x91p}_H7A\xef\x85\xd4\x11S\xae\xa3\xf2l}>s\xd5\xccv/a\xe2\xcc\x97\xd9\x04\xb5\xa5\x96G\xfd\x0e\xff\x12\xf6c\xbaU\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xcf\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x91\x00H\x00\n\x00P\xda\xfb\x1e\xf0]\xf0\xa6\x02v>B/\xc9\xb99\xbc$\xf6\x0c\x94\xbfH\x01^\xebj 7B\xda\x85\x1a-Vs\x16\x10$H\xbd\xca\x1cNZe\xfa\xf6\x01\x98\x93\xb2pc9<"\xc7bD_(!\xc8\x85>\xdc\x8a\x0c \xff\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd0\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9d\x00T\x00\n\x00P\xd9\xd8G\xbe\x86J\xd034\xadsh\xda\xf9\x0b\x9c`\x0fh\xd5p\xf4x\x88\xa8\xf2f\xc3*{\xadS[\xdb\x14\x955sH\xddG9\xe8H\xd9&\x8b\x05\x91\xcd\xb6>\xcb\xeb\x82O%\xc9\xb1\xdaX\x04t\x0f\xbej\x8c\xfc\xcc\xfd\x1dx[\x0e\x1c\x95\xb5P\xe0\x12\xa5\xf7\xe2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd1\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x8d\x00D\x00\n\x00P\xd9\xd89br\x85\t%\x83aW\xae\xad@D0f\xdc\t\xa2\xbexl\x1c\xc4&\x99I\x16\xa1\xd2\xf1\xd3\x98\xa1\xdbG\x06\xf5\xac\xa2\xa0Z)kZ\xf5\xda3\xdc\xf9\x9e\xafW\xd2\xce\xb8s\x961\xdcN\x96\xbb"\x8e\xd0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd2\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x93\x00J\x00\n\x00P\xd7\x0b\xd1\xbd\xc9$\xcbl\x94\x9b\xc7\x84\x03\xf6,\xc2\xa2\xad\xbe<\x05\xe6\x0c\x8a\xe2\xdd\xf2\xda;4\x99\xab\xef\xa4\xd5\x83\xd9\xed\x8cP\x83N?\xc1A\x94\xaf\x95$\xdc\xf8\xe3PE@\x89\xb6\xdb\xa5\xccbd\xe5V\x93%\xbe\xdc\x03\x17\x8e\x03p\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd3\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa6\x00]\x00\n\x00P\xd2T\xc2:d6\x8a\xe2b\x01\x9d\xe3\xdc\xaf\xb0.\x91\t\x1a\xd0\x89'\xf3y\xf2R\x12Ur\xb7\xafn\xd4\x83\xed\xd1h\x18\xa5\xf6\xb3\x18\x87c%\xa4\x13\x02y\\p=sF@\xf0\x192p\x17\xb0\xc7\x08\xec\xa0\xb4\xf3\xb7V\xc4Q0\xc1\xcc\xf7\xa7\xba\xd44\xfd\xe8xj_\x9f\xd8\xe1+L\xd1+\x05\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd4\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa4\x00[\x00\n\x00P\xce?\x94n1iY\xd2ve\x99\xae\x8f\x1c\xc7X\xdb\xb2\x8b\xf5y\x95T\x95\x81\xf4\x8ck\xa7\xebK^\xa0\x0f\x8a\x8b\xd5\xbc\xf3\x8f\xa4\xdf:~\xcc}\x05\x0c\x9e\xbfX\xbc\x10-\xd6\x17RT4]\x05\xe0\xda\x17\xa2O3\x9c7\x1e\x06C?3-\x172\xaf}#\xc5`Q\xf7\xa8\xb4+\xc2\xe2\xe2\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd5\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa0\x00W\x00\n\x00P\xc4\x8e\x86\xda\xc6$\xdfC\xc0f\x04\x7f\x8dO\xb1\x9e\x04\xa1\xb5j\xa4S\x96\x9e{j8^)\x17\x813fQ\xb9r\xa9\x1c\xfcs\xe7\xd6\x9b\xf0\xa6\xe3\xbb\xf9\xab\xa2\t\x18\xceY\xd3Z\xc5\xc9\x0cW\x11w\xdf\xea\xf4&\xb2{$oJp\x9cy\xa1\x06\xaf\x80\xa1\xec\xa6\x12\xf1\xf3\xb2\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd6\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb2\x00i\x00\n\x00P\xc4t\x0f\xf9n\xcdFT\x11\x804bl\xc8W\xc7\xe4\xe7\xf5H\xc5w\xc9\xf6\xd2\x07;&\x8f\xeb\x1bZi*\xf8\xd9\xfa;\xdbU\xc9?\xbb\r\xaf@\x8d\x8a\xacx\x18@\xb0\xb8a\xa5/P/{\x85\xd8\xaf\xad\xf5\xa6[Q\xe4\xc0>\x80\xf2ybuk\xe3,\xa0\xf2\x05\x9e\xd4\xed\n{\xf3\xcc\x9b\xc0_q\xcf\xd3\xff\x19\xf6AX!\x87\xfb\xe0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd7\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xaa\x00a\x00\n\x00P\xc55n\xfaS}\x13\xd4s\r\xa3\xb8\x7f\x81&8\xbcF\xca\x88}\xfa\x9a\xea\xcaU%\x92\xa8\xd4il\xbe:Y\xb4g\xad{C\xf8t2\xb73\xf8\x1f\xdf\xf88\xed\xcc\xa2\x82\x97\x99\xbb=\xb4U\xfd\xff\xcd"\xba\x80\x94\x0e\xf1\xe4\xc3\xd5\xcd\xa7\x83\xca\xb4$\xee\x82\x15r\x97\x9f\xf8\x96\x81\xe1\xd1\xeb\xf9\xd7]I\xbfb\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd8\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9e\x00U\x00\n\x00P\xc4\x8e\xf8G\xd8{\xf0\x91s\xe6\xec\xd8\x04\x8c\xdc\xe17\xc8!gp\x9ec\xb8:\xf3\xe0\x8e\x90\xc9\xaf\xa0O\x91Y\xfaJ\xa4\xcd\xee1?e\xfd\xf3w\xa2\xe84\x1e\xde8 \x11\xdb\xab`\xff\xd8\xd28\xc1\x96\xcd\xed\x14A\x99\x01\x88\x00W\x97G\xefZ\xe4cOG3\xb2?x\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xd9\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9f\x00V\x00\n\x00P\xc3\xc8\xdbT\xe9\xe4W\xff\xd2v\x9d\x8a\x0c6\xe3T\xd4M\t?[+\xf7\xf3\xc6g\x86y\xe37ykn\xdc\xae\x0crD\xc8\xbf\xdaDD\xbb\xef(\xf8\x16\x83\xcf\x8a:%\xd9\x87\x1d\x9eO\x1eB\xec\xa36\xa0X\xdc_\x84\xe1\xb8uY\xf3$\x038\x92\x9a\xd5\xe7\xf1\x1cv\xf5\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xda\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9c\x00S\x00\n\x00P\xc3i\xe2\xdb\xc4\x02\xc8\xc8\x85!\xf3\x10\n\x89t\x03 \x17\x84\xee-T\x0eA*6\xb5\xc2q\x91qh\xa1\xf1\x1c\x0ba\x94\x8c@\x08)\xa9\x06\xa4:\x9a\xf0[\xed\xfc8\x94\xa4\x81\xdf\x93|\x85\x19\xb4\xbb\x9f\xb00\xd7J\xdb\x19ojW\xec\xb7\x99H\x8c^\xbf\x04\xba\x8f\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xdb\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xae\x00e\x00\n\x00P\xc0$\x8f\xd8\xf79\x1as\x82@\xad\xd7\xeeQ\x95w\xaf\x06+\xbe\xd6\xb7J\xbd\xb8\x81\xe6\xd1\xb4MWc<x\xb1q\xda$\x85\xab\x13\xaa"\xa9\xd2\xbdp\xc0\xec\xad\xc2\x8a\xc9_\xa4\xac\xad\xe8\xafPi\xf4\'2\xfe\x86\xc5\xea\x85\x08\xf6\xee\x90=$\xc04\x91,\x1aH\xea\x8bUD\x19]\xf3L\xcb>U-\xbf\x88\x9d\xef\xad\xa5\xf8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xdc\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb0\x00g\x00\n\x00P\xd04\xad\x812I~\x86\xb4\xbc-\x8c\xa7\x80\xcb\x9d\x03\xde\xbe\x0b\x9e\x04\xd6\xbc\xd3\xdd\xfa\xa4\xcd\xca\x7f\xdc\xe0w\r\xda\x13=&\x8a\x92:\xf0.\n0\x187\x8b\xfa\x80\x8fd4,\xdcH;\xd0\xa8m\xa8B\x0c\x91$\xa7E\x93\x87\xcc\xba\x0f\xa5\x8e)1\xdf\xb0\xfc\xea\xd2\xcd\xec\xb6z1L\xb3\xd5\xa1\xb1V\xc7w\xe1\xbc\xb7{\xdf\xad\xae\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xdd\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa4\x00[\x00\n\x00P\xd4\xbe`\x0b)\xa9\xb5\x1b\x19\x8b\x05\x8e\x00^et\xfb2\xea\xf0w\xd2\x0cz\xf6\x0c\x97\xb0\x9b\xa9\xeb\xb9EV\xba-p/\x9c\x1a\x8f\xb9/\xe0&\xac$)=Py\xd7\x1b\x8c\xf5\xff\x99\x10Q\xcd\xa8{\x8f)\xcc\xc3\x00\x7f\xc0\xe6\x9bp>4\x82\x8d\xc7\xf1\xb6\x0e\xd3\x90\xca\xbf\x8aR\xc7.\xfb\x1c\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xde\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x99\x00P\x00\n\x00P\xd3/\x12\x1d\xe9\xc5P\xd9\xf0\xffg|\xc2\xea\x9e\x8eF\xa4\xb73\x9e\x0c9wD\x11\x85\xd89\xd0\xc7v\x9a\xa7\xf2\xf0.\xe8\xb0\x8f\xdc%\x96\xc8`\x89=DP\x00\xa1\xccM\x1b\xb7d\x93\x9a\xc3\xea@ny\xceoSU\x85/%\x0blp\xb9\xef\xe6\xabs\x08\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xdf\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa6\x00]\x00\n\x00P\xcf\xde\xc1\x81sV\x13\xa9Xp P\x82h\xbe\xa7\xee\x10\xfa\xec\xa3$=0?\xe0,\x9e|\x10\xe0\xadw\xa5\xf2\xce\xb4\x8d\x82@\x01\x89U\x1e\xe9|\xd0\xb3\xf3'\x9dIk\x91l\xac\xdf}Y\xfc5\xedQK\x97|L\xf0\x93\x05\x10\xcd\xd3\x19B\xdc\xdb\x1dD\xe3\x9d\x83\xa3\n^\xa64\xf6\xce\x8c\x07\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xa1\x00X\x00\n\x00P\xcd\x13\xbd\x15\x91\x83"F\x85\xd6\x84K)N\xc5\xb7\x14\xc9y<|\xf3\xec\x89\xdc\xea\x99\x0c\xae\x87v\xaa]69\xdf\xedj\x84\x06\xb4Z\xfdL\xe0\x93Z<d\xe8\xd7-r\x9aB\x9a\x8c,lm\x1a\x0b\xd8\xadk\x14[o\xe2\x079#\x84^m\x05\xf8\xbbZ\xdaF\x89\xaf\xf2M\xa6\xd3\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe1\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9b\x00R\x00\n\x00P\xc3\xa4`(r\x9d<\xb4\xbe\xe1 \xf9\xe5N> *\xe9\xbf\xce%2\x91\xb6\x00[\xfc\xb6o\x8b\xfeD\xf03\x16\x86\xfd\xa2\xcfh\x88\xd2\xafk\x17\x10\xb4DM\xf7\x1c\x190\x8fh\x91\x9c\xcd\x81\xe8fj]\x80J\x83\xf2\x00\x17\x97\x96|k\xa4\xcc\xbb\xf4\x05\xc1\xb5\xb8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe2\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa1\x00X\x00\n\x00P\xc3\x97\xe8\xbcg\xf4v\xca\xe3\xbf\xd7\xb4\xdd\xb3\xc2\xe3\xdbD\xa6\xd5\xd8J\xb5\xcf:\x01\xb1\x05UBL\xcf\x9al\x93q\xd6\xb1SQ\x87\x8b\xdf\xf2\xac\xac\xac\xcd\x05\xdc\xd4\x9ap^\xfd\x13\xdc\xa2\x97\x08\x98c\xd6\x02\xb7\xea\xfb\xd0\xd7\xdc\x84\xfd\x7f6\xe59\x05\xbf(\xf2K\x12\xce\xa9\x10\xcf\xd8\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe3\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa5\x00\\\x00\n\x00P\xcd49\x8cH\xb9\x0c5\xb74J\x13%&\\\x94\xbdI\x1f\x9f.\xf9\xf6D!\xad\r\xa6k\x95\xb3?\xddi\xeb\xf4\x17z\x80\xf4\xe4\xfd\xf8\xd6\xe9\xc8\xc6\x1c\xb7\xfa"0\xc2\xe5\x04\'(\xa8\x06\x90t\r\xc8D\r\x8a\x80s`u9\x07\x9d\x1a\xbcN\xdd\xae\xf4\x97=:\x9d\x93M\xee\xa3\x13\x0fj@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe4\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xd1\\\x88\x1e*I\xf0\xb8(\xda\x19\xdd\x80\xed\xf6\xaa~\xa99& N\xe6#R\xd4/\x0c85\xdd\x883\x96{\xf5\x1e\xf4\x92\x1e\xbd\x0f&\xce\x02%a\xf0\x1d>\xa4m\x91\xc6$\x84.bAV\xbaF\xca\xfa\x0b\xc5UHn\x1e\\\xf0d\xadC\xa5\xfb\xc4zPXz\xa2\x065WAK\xc1\xd3\x872\x8a\xc6@\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe5\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xb3\x00j\x00\n\x00P\xd3>\xc1\x9d\xe9f\x08F\xa9\x9aZ\x1f\n|O\x1f\x8a\xb4Y\xc6\xc7\xbc#\xd4B\x14]\xf8\xe7\xd6\xf6\xc3\x94\xcad\xa473C\x13w\x10\xefN\xc1\xcc4\xe2C\xcd&*\xe1\x05\xcf\x14\x95\xdb\xcd\xe2\xa4J\xcf\x08\xbb\x0bV\x9f\x93\x8e/\xdd(\xa4":\x1c\xd8a\xe9\x17\xd2\x9b\xc8\x00E\xa3\xc7\x17V6^\x8d~L\xd4\xa3h\x9d\xd9\xc5\x95*\xb6 \x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe6\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa9\x00`\x00\n\x00P\xc7n\xc8O\x01/\x9c\xa0#\xcfw,~9\x05v\xc8\x0e\xec\x96T\xc7\xee\x12A\xdf\xac\x1c\xdb\xb1zc\xe7\x85=\xea\xeb1\xd3\x0cn\xdc\x99Y\x7f\x8f\x9f#\xb3\xe8\xc3\x9e\x08\xdeR\xcc\xdcJ\x97]\xc7\xc5\x9fw\xb1N\x1d~\xe8\x8f\xe6\x93\xf8\xf1a\x99-\xa6\xe3=\x81\x1ab=rZ#p\xddA\x87\xdaP%?\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe7\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa0\x00W\x00\n\x00P\xc9\x024\x91X\xeb6b\x95\x8f\xac\x93\x00\x02\x90\xbf+5\xc3\xea\x8b\x8b\xcc)\x90\x8e\x11\xb0\xd3\xe9\xdb\x8c\xf7\xe6\r\x94-\xb6A!\xac\x92\xee\xb80P\xb0\xa8]\xfb\x133K{Y\xc4\xbe\x12\xec{\xab\x83\xab\xed0\x9d\xc5\x1d\xbb\n\xa1\xa2l\xcbzS4N\x01\xe9\x0f\x98\xec\xd4\xe5\x9a\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe8\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa0\x00W\x00\n\x00P\xc9\xa7v\t_|(\x942=\xa0=\xb6a\x13m^\xa7\xb0\x14\xc6\x977\x82,\xd2\xee\xdc\xf9\xc0-\xb6e\x19\x1d\x83\x96\xb0#\xf3\x917Jt\xb5\x9d\x1bf\xb1\xcd\xffr^\xb5\xe6\xab\xaa\xd3\xa6\xf1\xc3l(Z\xe9\xb7\t\xa4\xfcL$\x7f\x11>\xa6\x15\x82\xe4\xf3\xb7\x0c\xbcb\xf1\xbf0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xe9\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\xa6\x00]\x00\n\x00P\xc7\xa16\xab\xc3\t8\x9eP\xcc0\xe6V2h\x987JZ\xec:\x99\xdd\xa5?\xbb\xe5\x19}\x9e:W\xab}]wOE\x8c~J\xb7S\x8aw\x91x\x1aR'R^)\xa62\xa8\xddJ\xc0h\xaez\x1a\xd6\x7f\xffo\xb1!\xed\xc4\xf7\x1d\xc83\xac\xb4\x8fA\x16\x8b\x94\x7f\xdd\x08\xd8ol\xe7\x9d\xf4\x80\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xea\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xad\x00d\x00\n\x00P\xd3\xec=V\x102\x005j\xc2pEN8\x9c\x91\xa5fb\xd4\x88Z\xeaC\xd9\x17>\xc8H\x98\xac\xb32f\xc7\x13\xcb\xe1\xf4\xec\x8cR\xb3R\xb6_\xabO\xd8:\xb8o)W\x14\x07r\xea\xca\x19\x8b\xab\nI\x05\xcc&\xa8 \xd9\xe8J/`\xc2\xad\x7f\x8bG!\x7f-\xa1\xc1\x1a \x0c\x82\xed\xdc\xc5\x0b\x1c4\xbb1\x00\xf1\xdc\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xeb\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xae\x00e\x00\n\x00P\xd1\xc9\xb0\xc5\x16\xc5\rs\xdd\x84\xf3I!\x06\xed\x81\xae\x89\x10D2\xf1k\xd7\xeeZd\xeb\xc0+|Q\xfe\xf5\xca]\xa2\xd3s\xef"\xb3\x8e\xaf\xbb\xd2\n\xa1\xd72M\x02C]\xc10p"@\xa8\xc1n\xb6\xeb\x19<\x92\xc7\xff,\\P\x8c\xb5\xe6\xa6j\xd7\xae\x94\xfa\xad\xac\x934F^J\xb0\xed\xec\x12\xaf\xcf5\xa1n\x8e@=\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xec\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b"\x98\x00O\x00\n\x00P\xc54'\xac\xa3\xdf3\x12\xde\x83\xaf\x8a+]\x1e\xc8\xcb\x0e\xd5c\x16H\n\xdc~\x1e\xd4\xb9\xae\xa6F\xe1\x01Y\xa8-\xc7j\xdb\xd97\xf9e\xde\x91\xb9\x98\xb5\x0f\x1e\x98+\xde\x01]X\xe2_\x9c\x13x_xC\xf8K|*\x89\xa8\xf5$J\xb3\x16\xe0\x82\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xed\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A",
b'\xab\x00b\x00\n\x00P\xc4\xc9I&\x7fZ^\x13v\x12\x15\xc2\xe5>\xcb\xb5.\x17q\xed\x15Y\xb9\xc0P\x97BN\xa1\xef\xc2\xd3\xf4\xb7\x9d}\xd9jx\xc2\x87\xcf\xe17\x8c\xe9\xd0U\xa25Mf\xc5M\x98\xfd\x92?5W-P\x94#\x85F\x89\x18\x06\x02\x9ba\x11\x06\xd0\xe6#`\x12\x14\xbc\xff\xd1\x874%\xe6\xeb\xef\x00\x9d,\x96\xa4\x19T\xc0\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xee\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\xa6\x00]\x00\n\x00P\xc8A\xe8`\xba\x9f\xbbC8\x01\xedn\xe0\xf1\x00Y#\xfc\x03EW\xbe\x00T\x02\xdf\xca\xe7 \x17\x04\xef\xc1\xeb\xcc\xd4-ng\xbc\x11\xd8\xdd\n\xf2\x8f\xef\xe0\xc7dh\xb0\x8c\xcbq\x0f\xfd,\xd9\xa3\x11\x86\x94\xe0^=\x06\xcd!\x10P\xf3/pZz%:\xae\xd3\xa5\\\xccd%\xa4\x1f4\xd6\xb2\xe3\x94\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xef\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
b'\x9d\x00T\x00\n\x00P\xc9\x83\x85\xd12\xdb\xb4oe\xef\x1b`\xbco\xd9\x8c\xdb\xd6\xa0\xc9\xea}\x92\xcd\xf6/\xdc\x9a69W\xc4\xee\xd0\x80\xef\xbc\t\tFo\xee*H\xf9D\x12\x0fe\xd7\xdb\x83X\xfd\xc1\xfb\xcb\xc58qbQU*#\xb8\x97\x13\x8d\xc0\x9c\xb8!\xb0\xad#\xb1`\x8bj\x0c\x19_\x00\x00\x00@\xee-\xb2A\x00\x00\x02\n\x00\x01\xf0\x00\x00\x00\x00\x00\x00\x00\x00titJbdwfPEidr2nlJ47e4AtitJbdwfPEidr2nlJ47e4A',
]
}
def create_socket():
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def send_data(udp_sock, data):
udp_sock.sendto(b'abcd' + data, ('127.0.0.1', 6667))
def send_thanksgiving():
s = create_socket()
print("START transmission at", arrow.now())
for packet in data['thanksgiving']:
send_data(s, packet)
print("END transmission at", arrow.now())
def send_another_burst():
s = create_socket()
print("START transmission at", arrow.now())
for packet in data['burst']:
send_data(s, packet)
print("END transmission at", arrow.now())
send_another_burst()
time.sleep(5)
send_thanksgiving()
time.sleep(90)
send_another_burst()
time.sleep(5)
send_thanksgiving()
| 308.336585
| 499
| 0.741398
| 12,758
| 63,209
| 3.668365
| 0.170325
| 0.190765
| 0.185765
| 0.165125
| 0.412021
| 0.409906
| 0.409906
| 0.409243
| 0.404543
| 0.386851
| 0
| 0.267222
| 0.026278
| 63,209
| 204
| 500
| 309.848039
| 0.493176
| 0
| 0
| 0.072539
| 0
| 0.704663
| 0.901169
| 0.898037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020725
| false
| 0
| 0.015544
| 0.005181
| 0.041451
| 0.020725
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5df4da8e4aa7a570cc1b6a8bacfefa6622a02095
| 8,902
|
py
|
Python
|
stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | 1
|
2021-12-02T00:17:21.000Z
|
2021-12-02T00:17:21.000Z
|
stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | null | null | null |
stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
from ..tracktotruthmetrics import SIAPMetrics, IDSIAPMetrics
from ...measures import Euclidean
from ...types.groundtruth import GroundTruthPath
from ...types.metric import SingleTimeMetric, TimeRangeMetric
from ...types.track import Track
def test_siap(trial_manager, trial_truths, trial_tracks, trial_associations):
position_measure = Euclidean((0, 2))
velocity_measure = Euclidean((1, 3))
siap_generator = SIAPMetrics(position_measure=position_measure,
velocity_measure=velocity_measure)
trial_manager.generators = [siap_generator]
timestamps = trial_manager.list_timestamps()
# Test num_tracks_at_time
for timestamp in timestamps:
assert siap_generator.num_tracks_at_time(trial_manager, timestamp) == 3
# Test num_associated_tracks_at_time
assert siap_generator.num_associated_tracks_at_time(trial_manager, timestamps[0]) == 2
assert siap_generator.num_associated_tracks_at_time(trial_manager, timestamps[1]) == 3
assert siap_generator.num_associated_tracks_at_time(trial_manager, timestamps[2]) == 3
assert siap_generator.num_associated_tracks_at_time(trial_manager, timestamps[3]) == 2
# Test accuracy_at_time
assoc0_pos_accuracy = np.sqrt(0.1 ** 2 + 0.1 ** 2)
assoc1_pos_accuracy = np.sqrt(0.5 ** 2 + 0.5 ** 2)
assoc0_vel_accuracy = np.sqrt(0.2 ** 2 + 0.2 ** 2)
assoc1_vel_accuracy = np.sqrt(0.6 ** 2 + 0.6 ** 2)
exp_pos_accuracy = assoc0_pos_accuracy + assoc1_pos_accuracy
exp_vel_accuracy = assoc0_vel_accuracy + assoc1_vel_accuracy
pos_accuracy = siap_generator.accuracy_at_time(trial_manager, timestamps[0], position_measure)
assert pos_accuracy == exp_pos_accuracy
vel_accuracy = siap_generator.accuracy_at_time(trial_manager, timestamps[0], velocity_measure)
assert vel_accuracy == exp_vel_accuracy
# Test truth_track_from_association
for association in trial_associations:
truth, track = siap_generator.truth_track_from_association(association)
assert isinstance(truth, GroundTruthPath)
assert isinstance(track, Track)
# Test total_time_tracked
assert siap_generator.total_time_tracked(trial_manager, trial_truths[0]) == 3 # seconds
assert siap_generator.total_time_tracked(trial_manager, trial_truths[1]) == 2
assert siap_generator.total_time_tracked(trial_manager, trial_truths[2]) == 1
assert siap_generator.total_time_tracked(trial_manager, GroundTruthPath()) == 0
# Test min_num_tracks_needed_to_track
assert siap_generator.min_num_tracks_needed_to_track(trial_manager, trial_truths[0]) == 2
assert siap_generator.min_num_tracks_needed_to_track(trial_manager, trial_truths[1]) == 2
assert siap_generator.min_num_tracks_needed_to_track(trial_manager, trial_truths[2]) == 1
assert siap_generator.min_num_tracks_needed_to_track(trial_manager, GroundTruthPath()) == 0
# Test rate_of_track_number_changes
exp_rate = (2 - 1 + 2 - 1 + 1 - 1) / (3 + 2 + 1)
assert siap_generator.rate_of_track_number_changes(trial_manager) == exp_rate
# Test truth_lifetime
for truth in trial_truths:
assert siap_generator.truth_lifetime(truth) == 3
# Test longest_track_time_on_truth
assert siap_generator.longest_track_time_on_truth(trial_manager, trial_truths[0]) == 2
assert siap_generator.longest_track_time_on_truth(trial_manager, trial_truths[1]) == 1
assert siap_generator.longest_track_time_on_truth(trial_manager, trial_truths[2]) == 1
# Test compute_metric
metrics = siap_generator.compute_metric(trial_manager)
expected_titles = ["SIAP Completeness", "SIAP Ambiguity", "SIAP Spuriousness",
"SIAP Position Accuracy", "SIAP Velocity Accuracy",
"SIAP Rate of Track Number Change", "SIAP Longest Track Segment",
"SIAP Completeness at times", "SIAP Ambiguity at times",
"SIAP Spuriousness at times", "SIAP Position Accuracy at times",
"SIAP Velocity Accuracy at times"]
for expected_title in expected_titles:
assert len({metric for metric in metrics if metric.title == expected_title}) == 1
assert len({metric for metric in metrics if metric.title not in expected_titles}) == 0
for metric in metrics:
assert isinstance(metric, TimeRangeMetric)
assert metric.time_range.start_timestamp == timestamps[0]
assert metric.time_range.end_timestamp == timestamps[3]
assert metric.generator == siap_generator
if metric.title.endswith(" at times"):
assert isinstance(metric.value, list)
assert len(metric.value) == 4 # number of timestamps
for thing in metric.value:
assert isinstance(thing, SingleTimeMetric)
assert isinstance(thing.value, (float, int))
assert thing.generator == siap_generator
else:
assert isinstance(metric.value, (float, int))
def test_id_siap(trial_manager, trial_truths, trial_tracks, trial_associations):
position_measure = Euclidean((0, 2))
velocity_measure = Euclidean((1, 3))
truth_id = track_id = "colour"
siap_generator = IDSIAPMetrics(position_measure=position_measure,
velocity_measure=velocity_measure,
truth_id=truth_id,
track_id=track_id)
trial_manager.generators = [siap_generator]
timestamps = trial_manager.list_timestamps()
# Test find_track_id
assert siap_generator.find_track_id(trial_tracks[0], timestamps[0]) == "red"
assert siap_generator.find_track_id(trial_tracks[0], timestamps[1]) == "blue"
assert siap_generator.find_track_id(trial_tracks[0], timestamps[2]) == "red"
assert siap_generator.find_track_id(trial_tracks[0], timestamps[3]) == "red"
assert siap_generator.find_track_id(trial_tracks[1], timestamps[0]) == "red"
assert siap_generator.find_track_id(trial_tracks[1], timestamps[1]) == "red"
assert siap_generator.find_track_id(trial_tracks[1], timestamps[2]) == "green"
assert siap_generator.find_track_id(trial_tracks[1], timestamps[3]) == "green"
assert siap_generator.find_track_id(trial_tracks[2], timestamps[0]) is None
assert siap_generator.find_track_id(trial_tracks[2], timestamps[1]) is None
assert siap_generator.find_track_id(trial_tracks[2], timestamps[2]) == "blue"
assert siap_generator.find_track_id(trial_tracks[2], timestamps[3]) == "green"
# Test num_id_truths_at_time
u, c, i = siap_generator.num_id_truths_at_time(trial_manager, timestamps[0])
assert u == 0
assert c == 1
assert i == 1
u, c, i = siap_generator.num_id_truths_at_time(trial_manager, timestamps[1])
assert u == 1
assert c == 0
assert i == 1
u, c, i = siap_generator.num_id_truths_at_time(trial_manager, timestamps[2])
assert u == 0
assert c == 2
assert i == 0
u, c, i = siap_generator.num_id_truths_at_time(trial_manager, timestamps[3])
assert u == 0
assert c == 1
assert i == 1
# Test compute_metric
metrics = siap_generator.compute_metric(trial_manager)
expected_titles = ["SIAP Completeness", "SIAP Ambiguity", "SIAP Spuriousness",
"SIAP Position Accuracy", "SIAP Velocity Accuracy",
"SIAP Rate of Track Number Change", "SIAP Longest Track Segment",
"SIAP Completeness at times", "SIAP Ambiguity at times",
"SIAP Spuriousness at times", "SIAP Position Accuracy at times",
"SIAP Velocity Accuracy at times",
"SIAP ID Completeness", "SIAP ID Correctness", "SIAP ID Ambiguity",
"SIAP ID Completeness at times", "SIAP ID Correctness at times",
"SIAP ID Ambiguity at times"]
for expected_title in expected_titles:
assert len({metric for metric in metrics if metric.title == expected_title}) == 1
assert len({metric for metric in metrics if metric.title not in expected_titles}) == 0
for metric in metrics:
assert isinstance(metric, TimeRangeMetric)
assert metric.time_range.start_timestamp == timestamps[0]
assert metric.time_range.end_timestamp == timestamps[3]
assert metric.generator == siap_generator
if metric.title.endswith(" at times"):
assert isinstance(metric.value, list)
assert len(metric.value) == 4 # number of timestamps
for thing in metric.value:
assert isinstance(thing, SingleTimeMetric)
assert isinstance(thing.value, (float, int))
assert thing.generator == siap_generator
else:
assert isinstance(metric.value, (float, int))
| 47.100529
| 98
| 0.694226
| 1,149
| 8,902
| 5.092254
| 0.093995
| 0.104427
| 0.097419
| 0.047171
| 0.801231
| 0.752008
| 0.746197
| 0.746197
| 0.720902
| 0.706204
| 0
| 0.018925
| 0.216468
| 8,902
| 188
| 99
| 47.351064
| 0.819928
| 0.047068
| 0
| 0.452555
| 0
| 0
| 0.091553
| 0
| 0
| 0
| 0
| 0
| 0.510949
| 1
| 0.014599
| false
| 0
| 0.043796
| 0
| 0.058394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d5877db5892996888d521ce7a03879213017f75
| 21,141
|
py
|
Python
|
p2ptracker/tests/test_transfers.py
|
TMG-nl/p2ptracker
|
0e6e77eac77de3fa4f15443920bc6f6886b129b4
|
[
"MIT"
] | 5
|
2015-04-29T04:55:21.000Z
|
2017-10-27T08:51:56.000Z
|
p2ptracker/tests/test_transfers.py
|
TMG-nl/p2ptracker
|
0e6e77eac77de3fa4f15443920bc6f6886b129b4
|
[
"MIT"
] | null | null | null |
p2ptracker/tests/test_transfers.py
|
TMG-nl/p2ptracker
|
0e6e77eac77de3fa4f15443920bc6f6886b129b4
|
[
"MIT"
] | null | null | null |
__author__ = 'ramon'
from flaskext.testing import TestCase
import redis
from p2ptracker import create_app
from p2ptracker.tests.helpers import utils
import os
import logging
from mocker import Mocker
log = logging.getLogger('hyves.p2ptracker.test.test_transfers')
REMOVE_LOG = False
SCRIPTDIR = os.path.dirname(__file__)
class TestTransfers(TestCase):
def create_app(self):
return create_app()
def setUp(self):
self.mocker = Mocker()
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
r.ping()
r.flushdb()
def tearDown(self):
if os.path.exists('p2ptracker.log') and REMOVE_LOG:
os.remove('p2ptracker.log')
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
r.ping()
r.flushdb()
# Actual Test methods
def test_transfers_with_no_data_and_params(self):
'''Should return an empty dictionary'''
resp = self.client.get('/transfers/')
self.assert200(resp)
self.assertEquals(resp.json, dict())
def test_transfers_with_active_transfer(self):
'''If a transfer is active, this will return a dict of hashes and their torrentfiles'''
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
file.seek(0)
utils.post_torrentfile(self.client, filename, file)
except Exception, e:
log.critical("Cannot open test torrent file")
self.assertTrue(False, "%s" % e)
finally:
file.close()
resp = self.client.get('/transfers/')
self.assert200(resp)
self.assertTrue(isinstance(resp.json, type(dict())))
self.assertTrue(ihash in resp.json)
def test_empty_stats(self):
'''If we have no clients the stats should be empty ???'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
transfersize = utils.get_size_from_filename(filename)
utils.post_torrent(self.client, filename)
resp = self.client.get('/transfers/%s.json' % ihash)
log.debug(resp)
self.assert200(resp)
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 0)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
def test_stats_with_a_seeder(self):
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=0)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 1)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '100.00%')
def test_stats_with_peer(self):
'''Test a single peer'''
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrentfile(self.client,filename, file)
finally:
file.close()
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
def test_stats_with_seeder_and_peer(self):
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize)
except Exception, e:
raise e
try:
ipaddress = '192.168.0.13'
rackname = 'FA13'
utils.add_client(self.app, ihash, ipaddress, rackname, left=0)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 2)
self.assertEquals(resp.json['global']['seeders'], 1)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '50.00%')
def test_stats_progress(self):
'''Test a single peer progress'''
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, mock_smdb=True)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=(transfersize/4*3), mock_smdb=False)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '25.00%')
def test_start_event_handling(self):
'''Test a single peer progress'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
transfersize = utils.get_size_from_filename(filename)
utils.post_torrent(self.client, filename)
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, event='started', mock_smdb=True)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
self.assertTrue(resp.json['global']['first_start'] is not None)
self.assertTrue(resp.json['global']['last_start'] is not None)
self.assertTrue(resp.json['global']['first_complete'] is None)
self.assertTrue(resp.json['global']['last_complete'] is None)
self.assertEqual(resp.json['global']['first_start'], resp.json['global']['last_start'])
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, event='started', mock_smdb=True)
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
self.assertNotEqual(resp.json['global']['first_start'], resp.json['global']['last_start'])
def test_stopped_event_handling(self):
'''Test a single peer progress'''
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
try:
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, event='stopped', mock_smdb=True)
except Exception, e:
raise e
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
self.assertTrue(resp.json['global']['first_start'] is None)
self.assertTrue(resp.json['global']['last_start'] is None)
self.assertTrue(resp.json['global']['first_complete'] is None)
self.assertTrue(resp.json['global']['last_complete'] is None)
def test_completed_event_handling(self):
'''Test a single peer progress'''
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
ihash = utils.get_infohash_from_file(file)
transfersize = utils.get_size_from_torrentfile(file)
file.seek(0)
utils.post_torrent(self.client, filename)
finally:
file.close()
ipaddress = '192.168.0.12'
rackname = 'FA12'
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, event='completed', mock_smdb=True)
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertTrue('global' in resp.json)
self.assertEquals(resp.json['global']['size'], str(transfersize))
self.assertEquals(resp.json['global']['peers'], 1)
self.assertEquals(resp.json['global']['seeders'], 0)
self.assertEquals(resp.json['global']['active'], True)
self.assertEquals(resp.json['global']['progress'], '0.00%')
self.assertTrue(resp.json['global']['first_start'] is None)
self.assertTrue(resp.json['global']['last_start'] is None)
self.assertTrue(resp.json['global']['first_complete'] is not None)
self.assertTrue(resp.json['global']['last_complete'] is not None)
self.assertEqual(resp.json['global']['first_complete'], resp.json['global']['last_complete'])
utils.add_client(self.app, ihash, ipaddress, rackname, left=transfersize, event='completed', mock_smdb=True)
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert200(resp)
print resp.json
self.assertNotEqual(resp.json['global']['first_complete'], resp.json['global']['last_complete'])
def test_get_peers_for_transfer(self):
'''This method tests the additional rest style interface to get at peers and seeders'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
resp = self.client.get('/transfers/peers/%s.json' % ihash)
print resp.data
print resp.json
self.assert200(resp)
self.assertTrue('peers' in resp.json)
self.assertEqual(sorted(resp.json['peers']), ['192.168.0.11:10004', '192.168.0.12:10004'])
def test_get_seeders_for_transfer(self):
'''This method tests the additional rest style interface to get at peers and seeders'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
resp = self.client.get('/transfers/seeders/%s.json' % ihash)
print resp.data
print resp.json
self.assert200(resp)
self.assertTrue('seeders' in resp.json)
self.assertEqual(sorted(resp.json['seeders']), ['192.168.0.11:10004', '192.168.0.12:10004'])
def test_get_leechers_for_transfer(self):
'''This method gets the remaining leechers'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.13', port=10004, rackname='testrack2', left=10000)
resp = self.client.get('/transfers/leechers/%s.json' % ihash)
print resp.data
print resp.json
self.assert200(resp)
self.assertTrue('leechers' in resp.json)
self.assertEqual(sorted(resp.json['leechers']), ['192.168.0.11:10004', '192.168.0.13:10004'])
def test_get_repr_for_transfer(self):
'''This method gets the all the reprs'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.13', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.14', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.15', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.16', port=10004, rackname='testrack2', left=10000)
resp = self.client.get('/transfers/representants/%s.json' % ihash)
print resp.data
print resp.json
self.assert200(resp)
self.assertTrue('representants' in resp.json)
self.assertEqual(sorted(resp.json['representants']),
['192.168.0.11:10004', '192.168.0.12:10004', '192.168.0.14:10004', '192.168.0.15:10004'])
def test_get_racks_for_transfer(self):
'''This method gets a list of racks involved in the transfer'''
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.13', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.14', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.15', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.16', port=10004, rackname='testrack2', left=10000)
resp = self.client.get('/transfers/racks/%s.json' % ihash)
print resp.data
print resp.json
self.assert200(resp)
self.assertTrue('racks' in resp.json)
self.assertEquals(sorted(resp.json['racks']), ['testrack1', 'testrack2'])
def test_remove_transfer(self):
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.13', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.14', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.15', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.16', port=10004, rackname='testrack2', left=10000)
resp = self.client.get('/transfers/racks/%s.json' % ihash)
self.assert200(resp)
self.assertTrue('racks' in resp.json)
self.assertEquals(sorted(resp.json['racks']), ['testrack1', 'testrack2'])
resp = self.client.delete('/transfers/%s.json' % ihash)
self.assert200(resp)
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert404(resp)
resp = self.client.get('/transfers/')
self.assert200(resp)
self.assertEqual(resp.json, dict())
resp = self.client.get('/torrents/')
self.assert200(resp)
self.assertEqual(resp.json, dict())
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
keylist = list(r.keys('*'))
for key in keylist:
self.assertTrue(ihash not in key.split(':'))
def test_remove_all_transfers(self):
filename = '%s/test.torrent' % SCRIPTDIR
ihash = utils.get_ihash_from_filename(filename)
utils.post_torrent(self.client, filename)
utils.add_client(self.app, ihash, '192.168.0.11', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.12', port=10004, rackname='testrack1', left=0)
utils.add_client(self.app, ihash, '192.168.0.13', port=10004, rackname='testrack1', left=10000)
utils.add_client(self.app, ihash, '192.168.0.14', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.15', port=10004, rackname='testrack2', left=10000)
utils.add_client(self.app, ihash, '192.168.0.16', port=10004, rackname='testrack2', left=10000)
resp = self.client.get('/transfers/racks/%s.json' % ihash)
self.assert200(resp)
self.assertTrue('racks' in resp.json)
self.assertEquals(sorted(resp.json['racks']), ['testrack1', 'testrack2'])
resp = self.client.delete('/transfers/')
self.assert200(resp)
resp = self.client.get('/transfers/%s.json' % ihash)
self.assert404(resp)
resp = self.client.get('/transfers/')
self.assert200(resp)
self.assertEqual(resp.json, dict())
resp = self.client.get('/torrents/')
self.assert200(resp)
self.assertEqual(resp.json, dict())
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
self.assertTrue(list(r.keys('*')) == [])
| 48.488532
| 118
| 0.628636
| 2,647
| 21,141
| 4.93351
| 0.068757
| 0.066774
| 0.069684
| 0.084539
| 0.894402
| 0.887817
| 0.881461
| 0.872885
| 0.856574
| 0.841872
| 0
| 0.059682
| 0.221702
| 21,141
| 435
| 119
| 48.6
| 0.733986
| 0.000899
| 0
| 0.794416
| 0
| 0
| 0.151383
| 0.010607
| 0
| 0
| 0
| 0
| 0.309645
| 0
| null | null | 0
| 0.017767
| null | null | 0.048223
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5d6c386ec52721522ec18c767b356c1fb0bc903e
| 5,928
|
py
|
Python
|
test/utils/test_pivoted_cholesky.py
|
Balandat/linear_operator
|
34c1bc6a0bf4010d54243a4503fb24b9c3201b95
|
[
"MIT"
] | 18
|
2020-11-13T14:21:38.000Z
|
2022-03-01T22:14:07.000Z
|
test/utils/test_pivoted_cholesky.py
|
Balandat/linear_operator
|
34c1bc6a0bf4010d54243a4503fb24b9c3201b95
|
[
"MIT"
] | 7
|
2020-11-16T00:53:27.000Z
|
2021-01-15T06:10:14.000Z
|
test/utils/test_pivoted_cholesky.py
|
Balandat/linear_operator
|
34c1bc6a0bf4010d54243a4503fb24b9c3201b95
|
[
"MIT"
] | 2
|
2020-11-13T02:31:11.000Z
|
2021-06-04T12:43:05.000Z
|
#!/usr/bin/env python3
from __future__ import annotations
import math
import os
import random
import unittest
import torch
import linear_operator
from linear_operator import settings
from linear_operator.test.utils import approx_equal
from linear_operator.utils import pivoted_cholesky
def rbf_kernel(x1, x2=None):
if x2 is None:
x2 = x1
if x1.dim() == 1:
x1 = x1.unsqueeze(-1)
if x2.dim() == 1:
x2 = x2.unsqueeze(-1)
dist = (x1.unsqueeze(-2) - x2.unsqueeze(-3)).norm(p=2, dim=-1).pow(2)
return dist.div(-2.0).exp()
class TestPivotedCholesky(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.linspace(0, 1, size)
covar_matrix = rbf_kernel(train_x, train_x)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
def test_solve_qr(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = torch.DoubleTensor(size).uniform_(math.log(1e-3), math.log(1e-1)).exp_().to(dtype=dtype)
linear_op = linear_operator.to_linear_operator(rbf_kernel(X)).add_diag(noise)
precondition_qr, _, logdet_qr = linear_op._preconditioner()
F = linear_op._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_solve_qr_constant_noise(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = 1e-2 * torch.ones(size, dtype=dtype)
linear_op = linear_operator.to_linear_operator(rbf_kernel(X)).add_diag(noise)
precondition_qr, _, logdet_qr = linear_op._preconditioner()
F = linear_op._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_solve_qr_float32(self):
self.test_solve_qr(dtype=torch.float32, tol=1e-2)
def test_solve_qr_constant_noise_float32(self):
self.test_solve_qr_constant_noise(dtype=torch.float32, tol=1e-3)
class TestPivotedCholeskyBatch(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.cat(
[torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0)], 0
).unsqueeze(-1)
covar_matrix = rbf_kernel(train_x, train_x)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
class TestPivotedCholeskyMultiBatch(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.cat(
[
torch.linspace(0, 1, size).unsqueeze(0),
torch.linspace(0, 0.5, size).unsqueeze(0),
torch.linspace(0, 0.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
torch.linspace(0, 0.5, size).unsqueeze(0),
torch.linspace(0, 0.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
],
0,
).unsqueeze(-1)
covar_matrix = rbf_kernel(train_x, train_x).view(2, 2, 3, size, size)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
if __name__ == "__main__":
unittest.main()
| 35.927273
| 108
| 0.619096
| 824
| 5,928
| 4.234223
| 0.145631
| 0.034394
| 0.060189
| 0.065348
| 0.83233
| 0.811981
| 0.785612
| 0.785612
| 0.785612
| 0.785612
| 0
| 0.038462
| 0.25
| 5,928
| 164
| 109
| 36.146341
| 0.746289
| 0.003543
| 0
| 0.700787
| 0
| 0
| 0.019641
| 0
| 0
| 0
| 0
| 0
| 0.055118
| 1
| 0.110236
| false
| 0
| 0.07874
| 0
| 0.220472
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d6ca86e153282330e979044dd3aa11cead54bd8
| 18,577
|
py
|
Python
|
PROPOSAL/tests/gen_testfiles_scripts/photonuclear.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 1
|
2020-12-24T22:00:01.000Z
|
2020-12-24T22:00:01.000Z
|
PROPOSAL/tests/gen_testfiles_scripts/photonuclear.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | null | null | null |
PROPOSAL/tests/gen_testfiles_scripts/photonuclear.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 3
|
2020-07-17T09:20:29.000Z
|
2021-03-30T16:44:18.000Z
|
import pyPROPOSAL as pp
import numpy as np
photo_real = [
pp.parametrization.photonuclear.Zeus,
pp.parametrization.photonuclear.BezrukovBugaev,
pp.parametrization.photonuclear.Rhode,
pp.parametrization.photonuclear.Kokoulin
]
particle_defs = [
pp.particle.MuMinusDef.get(),
pp.particle.TauMinusDef.get()#,
# pp.particle.EMinusDef.get()
]
mediums = [
pp.medium.Ice(1.0),
pp.medium.Hydrogen(1.0),
pp.medium.Uranium(1.0)
]
cuts = [
pp.EnergyCutSettings(-1, -1),
pp.EnergyCutSettings(500, -1),
pp.EnergyCutSettings(-1, 0.05),
pp.EnergyCutSettings(500, 0.05)
]
multiplier = 1.
hard_components = [0, 1]
photo_q2 = [
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor91,
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor97,
pp.parametrization.photonuclear.ButkevichMikhailov,
pp.parametrization.photonuclear.RenoSarcevicSu
]
photo_q2_interpol = [
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor91Interpolant,
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor97Interpolant,
pp.parametrization.photonuclear.ButkevichMikhailovInterpolant,
pp.parametrization.photonuclear.RenoSarcevicSuInterpolant
]
shadows = [
pp.parametrization.photonuclear.ShadowDuttaRenoSarcevicSeckel(),
pp.parametrization.photonuclear.ShadowButkevichMikhailov()
]
energies = np.logspace(4, 13, num=10)
interpoldef = pp.InterpolationDef()
def create_table_dEdx(dir_name, interpolate=False):
with open(dir_name + "Photo_Real_dEdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dEdx = xsection.calculate_dEdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dEdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx(dir_name, interpolate=False):
with open(dir_name + "Photo_Real_dNdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_rnd(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
with open(dir_name + "Photo_Real_dNdx_rnd{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
rnd = pp.RandomGenerator.get().random_double()
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx_rnd(energy, rnd)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_stochastic_loss(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
with open(dir_name + "Photo_Real_e{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
rnd1 = pp.RandomGenerator.get().random_double()
rnd2 = pp.RandomGenerator.get().random_double()
stochastic_loss = xsection.calculate_stochastic_loss(energy, rnd1, rnd2)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd1))
buf.append(str(rnd2))
buf.append(str(stochastic_loss))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dEdx_Q2(dir_name, interpolate=False):
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dEdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dEdx = xsection.calculate_dEdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dEdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_Q2(dir_name, interpolate=False):
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dNdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_rnd_Q2(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dNdx_rnd{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
rnd = pp.RandomGenerator.get().random_double()
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx_rnd(energy, rnd)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_stochastic_loss_Q2(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_e{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
rnd1 = pp.RandomGenerator.get().random_double()
rnd2 = pp.RandomGenerator.get().random_double()
stochastic_loss = xsection.calculate_stochastic_loss(energy, rnd1, rnd2)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd1))
buf.append(str(rnd2))
buf.append(str(stochastic_loss))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def main(dir_name):
# Integrate
create_table_dEdx(dir_name)
create_table_dNdx(dir_name)
create_table_dNdx_rnd(dir_name)
create_table_stochastic_loss(dir_name)
create_table_dEdx_Q2(dir_name)
create_table_dNdx_Q2(dir_name)
create_table_dNdx_rnd_Q2(dir_name)
create_table_stochastic_loss_Q2(dir_name)
# Interpolate
create_table_dEdx(dir_name, True)
create_table_dNdx(dir_name, True)
create_table_dNdx_rnd(dir_name, True)
create_table_stochastic_loss(dir_name, True)
create_table_dEdx_Q2(dir_name, True)
create_table_dNdx_Q2(dir_name, True)
create_table_dNdx_rnd_Q2(dir_name, True)
create_table_stochastic_loss_Q2(dir_name, True)
if __name__ == "__main__":
import os
dir_name = "TestFiles/"
if os.path.isdir(dir_name):
print("Directory {} already exists".format(dir_name))
else:
os.makedirs(dir_name)
print("Directory {} created".format(dir_name))
main(dir_name)
| 37.681542
| 110
| 0.425149
| 1,446
| 18,577
| 5.320194
| 0.080221
| 0.100611
| 0.077993
| 0.031197
| 0.834655
| 0.825426
| 0.804108
| 0.77837
| 0.77252
| 0.763421
| 0
| 0.01126
| 0.502826
| 18,577
| 492
| 111
| 37.75813
| 0.821676
| 0.002745
| 0
| 0.777188
| 0
| 0
| 0.018303
| 0.004859
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023873
| false
| 0
| 0.007958
| 0
| 0.03183
| 0.005305
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
53c9452719a80fa80781cf22dc927621583f63bc
| 12,322
|
py
|
Python
|
unittest/scripts/py_devapi/validation/mysqlx_collection_remove_prepared.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 119
|
2016-04-14T14:16:22.000Z
|
2022-03-08T20:24:38.000Z
|
unittest/scripts/py_devapi/validation/mysqlx_collection_remove_prepared.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 9
|
2017-04-26T20:48:42.000Z
|
2021-09-07T01:52:44.000Z
|
unittest/scripts/py_devapi/validation/mysqlx_collection_remove_prepared.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 51
|
2016-07-20T05:06:48.000Z
|
2022-03-09T01:20:53.000Z
|
#@<PROTOCOL> First execution is normal
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
}
#@<OUT> First execution is normal
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Second execution prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 1
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 1
}
#@<OUT> Second execution prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Third execution uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 1
}
#@<OUT> Third execution uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> sort() changes statement, back to normal execution
>>>> SEND Mysqlx.Prepare.Deallocate {
stmt_id: 1
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
#@<OUT> sort() changes statement, back to normal execution
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> second execution after sort(), prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 2
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 2
}
#@<OUT> second execution after sort(), prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> third execution after set(), uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 2
}
#@<OUT> third execution after set(), uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> limit() changes statement, back to normal execution
>>>> SEND Mysqlx.Prepare.Deallocate {
stmt_id: 2
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
limit {
row_count: 1
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
}
#@<OUT> limit() changes statement, back to normal execution
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> second execution after limit(), prepares statement and executes it
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 3
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "=="
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: LITERAL
literal {
type: V_OCTETS
v_octets {
value: "001"
}
}
}
}
}
order {
expr {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "name"
}
}
}
direction: DESC
}
limit_expr {
row_count {
type: PLACEHOLDER
position: 0
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 3
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> second execution after limit(), prepares statement and executes it
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> third execution after limit(), uses prepared statement
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 3
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> third execution after limit(), uses prepared statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Prepares statement to test re-usability of bind() and limit()
>>>> SEND Mysqlx.Crud.Delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "like"
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: PLACEHOLDER
position: 0
}
}
}
limit {
row_count: 1
}
args {
type: V_STRING
v_string {
value: "001"
}
}
}
#@<OUT> Prepares statement to test re-usability of bind() and limit()
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Prepares and executes statement
>>>> SEND Mysqlx.Prepare.Prepare {
stmt_id: 4
stmt {
type: DELETE
delete {
collection {
name: "test_collection"
schema: "prepared_stmt"
}
data_model: DOCUMENT
criteria {
type: OPERATOR
operator {
name: "like"
param {
type: IDENT
identifier {
document_path {
type: MEMBER
value: "_id"
}
}
}
param {
type: PLACEHOLDER
position: 0
}
}
}
limit_expr {
row_count {
type: PLACEHOLDER
position: 1
}
}
}
}
}
<<<< RECEIVE Mysqlx.Ok {
}
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "002"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Prepares and executes statement
Query OK, 1 item affected ([[*]] sec)
{
"_id": "001",
"age": 18,
"name": "george"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with bind()
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "003"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Executes prepared statement with bind()
Query OK, 1 item affected ([[*]] sec)
{
"_id": "001",
"age": 18,
"name": "george"
}
{
"_id": "002",
"age": 17,
"name": "james"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with limit(1)
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "%"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 1
}
}
}
#@<OUT> Executes prepared statement with limit(1)
Query OK, 1 item affected ([[*]] sec)
{
"_id": "002",
"age": 17,
"name": "james"
}
{
"_id": "003",
"age": 18,
"name": "luke"
}
2 documents in set ([[*]] sec)
Query OK, 1 item affected ([[*]] sec)
#@<PROTOCOL> Executes prepared statement with limit(2)
>>>> SEND Mysqlx.Prepare.Execute {
stmt_id: 4
args {
type: SCALAR
scalar {
type: V_STRING
v_string {
value: "%"
}
}
}
args {
type: SCALAR
scalar {
type: V_UINT
v_unsigned_int: 2
}
}
}
#@<OUT> Executes prepared statement with limit(2)
Query OK, 2 items affected ([[*]] sec)
{
"_id": "003",
"age": 18,
"name": "luke"
}
1 document in set ([[*]] sec)
| 17.428571
| 79
| 0.481902
| 1,240
| 12,322
| 4.681452
| 0.075
| 0.032558
| 0.035831
| 0.053747
| 0.96882
| 0.96503
| 0.941602
| 0.913695
| 0.909388
| 0.857537
| 0
| 0.030925
| 0.383298
| 12,322
| 706
| 80
| 17.453258
| 0.732991
| 0.129687
| 0
| 0.669811
| 0
| 0
| 0.073679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.